Example #1
0
def testBuildFuncOp():
    ctx = Context()
    with Location.unknown(ctx) as loc:
        m = builtin.ModuleOp()

        f32 = F32Type.get()
        tensor_type = RankedTensorType.get((2, 3, 4), f32)
        with InsertionPoint.at_block_begin(m.body):
            func = builtin.FuncOp(name="some_func",
                                  type=FunctionType.get(
                                      inputs=[tensor_type, tensor_type],
                                      results=[tensor_type]),
                                  visibility="nested")
            # CHECK: Name is: "some_func"
            print("Name is: ", func.name)

            # CHECK: Type is: (tensor<2x3x4xf32>, tensor<2x3x4xf32>) -> tensor<2x3x4xf32>
            print("Type is: ", func.type)

            # CHECK: Visibility is: "nested"
            print("Visibility is: ", func.visibility)

            try:
                entry_block = func.entry_block
            except IndexError as e:
                # CHECK: External function does not have a body
                print(e)

            with InsertionPoint(func.add_entry_block()):
                std.ReturnOp([func.entry_block.arguments[0]])
                pass

            try:
                func.add_entry_block()
            except IndexError as e:
                # CHECK: The function already has an entry block!
                print(e)

            # Try the callback builder and passing type as tuple.
            func = builtin.FuncOp(name="some_other_func",
                                  type=([tensor_type,
                                         tensor_type], [tensor_type]),
                                  visibility="nested",
                                  body_builder=lambda func: std.ReturnOp(
                                      [func.entry_block.arguments[0]]))

    # CHECK: module  {
    # CHECK:  func nested @some_func(%arg0: tensor<2x3x4xf32>, %arg1: tensor<2x3x4xf32>) -> tensor<2x3x4xf32> {
    # CHECK:   return %arg0 : tensor<2x3x4xf32>
    # CHECK:  }
    # CHECK:  func nested @some_other_func(%arg0: tensor<2x3x4xf32>, %arg1: tensor<2x3x4xf32>) -> tensor<2x3x4xf32> {
    # CHECK:   return %arg0 : tensor<2x3x4xf32>
    # CHECK:  }
    print(m)
Example #2
0
def testFuncArgumentAccess():
    with Context() as ctx, Location.unknown():
        ctx.allow_unregistered_dialects = True
        module = Module.create()
        f32 = F32Type.get()
        f64 = F64Type.get()
        with InsertionPoint(module.body):
            func = builtin.FuncOp("some_func", ([f32, f32], [f32, f32]))
            with InsertionPoint(func.add_entry_block()):
                std.ReturnOp(func.arguments)
            func.arg_attrs = ArrayAttr.get([
                DictAttr.get({
                    "custom_dialect.foo": StringAttr.get("bar"),
                    "custom_dialect.baz": UnitAttr.get()
                }),
                DictAttr.get({"custom_dialect.qux": ArrayAttr.get([])})
            ])
            func.result_attrs = ArrayAttr.get([
                DictAttr.get({"custom_dialect.res1": FloatAttr.get(f32,
                                                                   42.0)}),
                DictAttr.get(
                    {"custom_dialect.res2": FloatAttr.get(f64, 256.0)})
            ])

            other = builtin.FuncOp("other_func", ([f32, f32], []))
            with InsertionPoint(other.add_entry_block()):
                std.ReturnOp([])
            other.arg_attrs = [
                DictAttr.get({"custom_dialect.foo": StringAttr.get("qux")}),
                DictAttr.get()
            ]

    # CHECK: [{custom_dialect.baz, custom_dialect.foo = "bar"}, {custom_dialect.qux = []}]
    print(func.arg_attrs)

    # CHECK: [{custom_dialect.res1 = 4.200000e+01 : f32}, {custom_dialect.res2 = 2.560000e+02 : f64}]
    print(func.result_attrs)

    # CHECK: func @some_func(
    # CHECK: %[[ARG0:.*]]: f32 {custom_dialect.baz, custom_dialect.foo = "bar"},
    # CHECK: %[[ARG1:.*]]: f32 {custom_dialect.qux = []}) ->
    # CHECK: f32 {custom_dialect.res1 = 4.200000e+01 : f32},
    # CHECK: f32 {custom_dialect.res2 = 2.560000e+02 : f64})
    # CHECK: return %[[ARG0]], %[[ARG1]] : f32, f32
    #
    # CHECK: func @other_func(
    # CHECK: %{{.*}}: f32 {custom_dialect.foo = "qux"},
    # CHECK: %{{.*}}: f32)
    print(module)
Example #3
0
  def trace(self):
    # Invoke the python function with placeholders.
    # TODO: More sophisticated signature merging
    # TODO: Multiple results
    # TODO: Error reporting
    ic = self._ic
    ic.insert_end_of_block(self.entry_block)
    with ic.context:
      py_results = (self.epf.pyfunc(*self._python_args),)
      if len(py_results) != len(self._f_types):
        raise TracingError("Traced function returned != %d results: %r" % (
            len(self._f_types),
            py_results,
        ))

    # Narrow all results to the declared return types.
    return_operands = []
    for py_result, mlir_result_type in zip(py_results, self._f_types):
      mlir_result = self.get_traced_array_value(py_result)
      # narrow to declared result type.
      return_operands.extend(
          numpy_ops.NarrowOp(mlir_result_type,
                             mlir_result,
                             loc=ic.loc,
                             ip=ic.ip).results)
    std_ops.ReturnOp(return_operands, loc=ic.loc, ip=ic.ip)
    ic.pop_ip()
Example #4
0
def testBlockCreation():
  with Context() as ctx, Location.unknown():
    module = Module.create()
    with InsertionPoint(module.body):
      f_type = FunctionType.get(
          [IntegerType.get_signless(32),
           IntegerType.get_signless(16)], [])
      f_op = builtin.FuncOp("test", f_type)
      entry_block = f_op.add_entry_block()
      i32_arg, i16_arg = entry_block.arguments
      successor_block = entry_block.create_after(i32_arg.type)
      with InsertionPoint(successor_block) as successor_ip:
        assert successor_ip.block == successor_block
        std.ReturnOp([])
      middle_block = successor_block.create_before(i16_arg.type)

      with InsertionPoint(entry_block) as entry_ip:
        assert entry_ip.block == entry_block
        cf.BranchOp([i16_arg], dest=middle_block)

      with InsertionPoint(middle_block) as middle_ip:
        assert middle_ip.block == middle_block
        cf.BranchOp([i32_arg], dest=successor_block)
    print(module.operation)
    # Ensure region back references are coherent.
    assert entry_block.region == middle_block.region == successor_block.region
Example #5
0
def testTransferReadOp():
    module = Module.create()
    with InsertionPoint(module.body):
        vector_type = VectorType.get([2, 3], F32Type.get())
        memref_type = MemRefType.get([-1, -1], F32Type.get())
        index_type = IndexType.get()
        mask_type = VectorType.get(vector_type.shape,
                                   IntegerType.get_signless(1))
        identity_map = AffineMap.get_identity(vector_type.rank)
        identity_map_attr = AffineMapAttr.get(identity_map)
        func = builtin.FuncOp(
            "transfer_read",
            ([memref_type, index_type,
              F32Type.get(), mask_type], []))
        with InsertionPoint(func.add_entry_block()):
            A, zero, padding, mask = func.arguments
            vector.TransferReadOp(vector_type, A, [zero, zero],
                                  identity_map_attr, padding, mask, None)
            vector.TransferReadOp(vector_type, A, [zero, zero],
                                  identity_map_attr, padding, None, None)
            std.ReturnOp([])

    # CHECK: @transfer_read(%[[MEM:.*]]: memref<?x?xf32>, %[[IDX:.*]]: index,
    # CHECK: %[[PAD:.*]]: f32, %[[MASK:.*]]: vector<2x3xi1>)
    # CHECK: vector.transfer_read %[[MEM]][%[[IDX]], %[[IDX]]], %[[PAD]], %[[MASK]]
    # CHECK: vector.transfer_read %[[MEM]][%[[IDX]], %[[IDX]]], %[[PAD]]
    # CHECK-NOT: %[[MASK]]
    print(module)
Example #6
0
 def build(self, types: List[ir.Type]):
     """Builds the ir.Module.  The module has only the @main function,
 which will convert the input through the list of types and then back
 to the initial type.  The roundtrip type must be a dense tensor."""
     assert self._module is None, 'StressTest: must not call build() repeatedly'
     self._module = ir.Module.create()
     with ir.InsertionPoint(self._module.body):
         tp0 = types.pop(0)
         self._roundtripTp = tp0
         # TODO: assert dense? assert element type is recognised by the TypeConverter?
         types.append(tp0)
         funcTp = ir.FunctionType.get(inputs=[tp0], results=[tp0])
         funcOp = builtin.FuncOp(name='main', type=funcTp)
         funcOp.attributes['llvm.emit_c_interface'] = ir.UnitAttr.get()
         with ir.InsertionPoint(funcOp.add_entry_block()):
             arg0 = funcOp.entry_block.arguments[0]
             self._assertEqualsRoundtripTp(arg0.type)
             v = st.ConvertOp(types.pop(0), arg0)
             for tp in types:
                 w = st.ConvertOp(tp, v)
                 # Release intermediate tensors before they fall out of scope.
                 st.ReleaseOp(v.result)
                 v = w
             self._assertEqualsRoundtripTp(v.result.type)
             std.ReturnOp(v)
     return self
Example #7
0
 def visit_Return(self, ast_node):
     ic = self.fctx.ic
     with ic.loc, ic.ip:
         expr = ExpressionImporter(self.fctx)
         expr.visit(ast_node.value)
         casted = basicpy_ops.UnknownCastOp(ic.unknown_type,
                                            expr.value).result
         std_ops.ReturnOp([casted])
         self._last_was_return = True
Example #8
0
def testFunctionCalls():
    foo = builtin.FuncOp("foo", ([], []))
    bar = builtin.FuncOp("bar", ([], [IndexType.get()]))
    qux = builtin.FuncOp("qux", ([], [F32Type.get()]))

    with InsertionPoint(builtin.FuncOp("caller", ([], [])).add_entry_block()):
        std.CallOp(foo, [])
        std.CallOp([IndexType.get()], "bar", [])
        std.CallOp([F32Type.get()], FlatSymbolRefAttr.get("qux"), [])
        std.ReturnOp([])
Example #9
0
def testFirstBlockCreation():
  with Context() as ctx, Location.unknown():
    module = Module.create()
    f32 = F32Type.get()
    with InsertionPoint(module.body):
      func = builtin.FuncOp("test", ([f32], []))
      entry_block = Block.create_at_start(func.operation.regions[0], [f32])
      with InsertionPoint(entry_block):
        std.ReturnOp([])

    print(module)
    assert module.operation.verify()
    assert func.body.blocks[0] == entry_block
Example #10
0
def testOpsAsArguments():
    index_type = IndexType.get()
    callee = builtin.FuncOp("callee", ([], [index_type, index_type]),
                            visibility="private")
    func = builtin.FuncOp("ops_as_arguments", ([], []))
    with InsertionPoint(func.add_entry_block()):
        lb = arith.ConstantOp.create_index(0)
        ub = arith.ConstantOp.create_index(42)
        step = arith.ConstantOp.create_index(2)
        iter_args = std.CallOp(callee, [])
        loop = scf.ForOp(lb, ub, step, iter_args)
        with InsertionPoint(loop.body):
            scf.YieldOp(loop.inner_iter_args)
        std.ReturnOp([])
Example #11
0
def build_matmul_tensors_func(func_name, m, k, n, dtype):
    lhs_type = RankedTensorType.get([m, k], dtype)
    rhs_type = RankedTensorType.get([k, n], dtype)
    result_type = RankedTensorType.get([m, n], dtype)
    # TODO: There should be a one-liner for this.
    func_type = FunctionType.get([lhs_type, rhs_type], [result_type])
    _, entry = FuncOp(func_name, func_type)
    lhs, rhs = entry.arguments
    with InsertionPoint(entry):
        op = linalg.MatmulOp([lhs, rhs], results=[result_type])
        # TODO: Implement support for SingleBlockImplicitTerminator
        block = op.regions[0].blocks.append()
        with InsertionPoint(block):
            linalg.YieldOp(values=[])
        std.ReturnOp([op.result])
Example #12
0
 def import_body(self):
     ic = self.fctx.ic
     for ast_stmt in self.ast_fd.body:
         self._last_was_return = False
         logging.debug("STMT: {}",
                       ast.dump(ast_stmt, include_attributes=True))
         self.visit(ast_stmt)
     if not self._last_was_return:
         # Add a default terminator.
         none_value = basicpy_ops.SingletonOp(ic.none_type,
                                              loc=ic.loc,
                                              ip=ic.ip).result
         none_cast = basicpy_ops.UnknownCastOp(ic.unknown_type,
                                               none_value,
                                               loc=ic.loc,
                                               ip=ic.ip).result
         std_ops.ReturnOp([none_cast], loc=ic.loc, ip=ic.ip)
Example #13
0
def testStructuredOpOnBuffers():
    with Context() as ctx, Location.unknown():
        module = Module.create()
        f32 = F32Type.get()
        memref_type = MemRefType.get((2, 3, 4), f32)
        with InsertionPoint.at_block_terminator(module.body):
            func = builtin.FuncOp(name="matmul_test",
                                  type=FunctionType.get(inputs=[
                                      memref_type, memref_type, memref_type
                                  ],
                                                        results=[]))
            with InsertionPoint(func.add_entry_block()):
                lhs, rhs, result = func.entry_block.arguments
                linalg.MatmulOp([lhs, rhs], outputs=[result])
                std.ReturnOp([])

    # CHECK: linalg.matmul ins(%arg0, %arg1 : memref<2x3x4xf32>, memref<2x3x4xf32>) outs(%arg2 : memref<2x3x4xf32>)
    print(module)
Example #14
0
def testStructuredOpOnTensors():
    with Context() as ctx, Location.unknown():
        module = Module.create()
        f32 = F32Type.get()
        tensor_type = RankedTensorType.get((2, 3, 4), f32)
        with InsertionPoint(module.body):
            func = builtin.FuncOp(name="matmul_test",
                                  type=FunctionType.get(
                                      inputs=[tensor_type, tensor_type],
                                      results=[tensor_type]))
            with InsertionPoint(func.add_entry_block()):
                lhs, rhs = func.entry_block.arguments
                result = linalg.MatmulOp([lhs, rhs],
                                         results=[tensor_type]).result
                std.ReturnOp([result])

    # CHECK: %[[R:.*]] = linalg.matmul ins(%arg0, %arg1 : tensor<2x3x4xf32>, tensor<2x3x4xf32>) -> tensor<2x3x4xf32>
    print(module)
Example #15
0
def emit_benchmark_wrapped_main_func(func, timer_func):
    """Takes a function and a timer function, both represented as FuncOp
    objects, and returns a new function. This new function wraps the call to
    the original function between calls to the timer_func and this wrapping
    in turn is executed inside a loop. The loop is executed
    len(func.type.results) times. This function can be used to create a
    "time measuring" variant of a function.
    """
    i64_type = ir.IntegerType.get_signless(64)
    memref_of_i64_type = ir.MemRefType.get([-1], i64_type)
    wrapped_func = builtin.FuncOp(
        # Same signature and an extra buffer of indices to save timings.
        "main",
        (func.arguments.types + [memref_of_i64_type], func.type.results),
        visibility="public")
    wrapped_func.attributes["llvm.emit_c_interface"] = ir.UnitAttr.get()

    num_results = len(func.type.results)
    with ir.InsertionPoint(wrapped_func.add_entry_block()):
        timer_buffer = wrapped_func.arguments[-1]
        zero = arith.ConstantOp.create_index(0)
        n_iterations = memref.DimOp(ir.IndexType.get(), timer_buffer, zero)
        one = arith.ConstantOp.create_index(1)
        iter_args = list(wrapped_func.arguments[-num_results - 1:-1])
        loop = scf.ForOp(zero, n_iterations, one, iter_args)
        with ir.InsertionPoint(loop.body):
            start = std.CallOp(timer_func, [])
            call = std.CallOp(
                func, wrapped_func.arguments[:-num_results - 1] +
                loop.inner_iter_args)
            end = std.CallOp(timer_func, [])
            time_taken = arith.SubIOp(end, start)
            memref.StoreOp(time_taken, timer_buffer, [loop.induction_variable])
            scf.YieldOp(list(call.results))
        std.ReturnOp(loop)

    return wrapped_func
Example #16
0
 def explicit_results(a, b):
     std.ReturnOp([b])
           C=TensorDef(T, S.M, S.N, output=True)):
    C[D.m, D.n] += A[D.m, D.k] * B[D.k, D.n]


with Context() as ctx, Location.unknown():
    module = Module.create()
    f32 = F32Type.get()
    lhs_type = RankedTensorType.get((4, 16), f32)
    rhs_type = RankedTensorType.get((16, 8), f32)
    result_type = NoneType.get()
    with InsertionPoint.at_block_terminator(module.body):
        func = builtin.FuncOp(name="matmul_test",
                              type=FunctionType.get(
                                  inputs=[lhs_type, rhs_type],
                                  results=[result_type]))
        with InsertionPoint(func.add_entry_block()):
            lhs, rhs = func.entry_block.arguments
            results = matmul(lhs, rhs)
            std.ReturnOp(results)
            # Rewrite the function return type now that we know.
            # TODO: Have an API or a setter for this.
            func.attributes["type"] = TypeAttr.get(
                FunctionType.get(func.type.inputs, [r.type for r in results]))

# TODO: This is not right yet.
# CHECK-LABEL: func @matmul_test
# CHECK:      %0 = linalg.generic {indexing_maps = [], iterator_types = []}
# CHECK-SAME:   ins(%arg0, %arg1 : tensor<4x16xf32>, tensor<16x8xf32>)
# CHECK-SAME:   outs(%arg0 : tensor<4x16xf32>) -> tensor<?x?xf32>
print(module)