def pass_an_op_directly(arg0, arg1): one = arith.ConstantOp(F32Type.get(), 1.0) # CHECK: %[[LHS:.*]] = linalg.fill lhs = linalg.FillOp(arg0, one) # CHECK: %[[RHS:.*]] = linalg.fill rhs = linalg.FillOp(arg1, one) # CHECK: %[[INIT:.*]] = linalg.init_tensor init = linalg.InitTensorOp([4, 8], f32) # CHECK: linalg.matmul # CHECK: ins(%[[LHS]], %[[RHS]] # CHECK: outs(%[[INIT]] return linalg.matmul(lhs, rhs, outs=init)
def fill_tensor(out): zero = std.ConstantOp(value=FloatAttr.get(f32, 0.), result=f32).result # TODO: FillOp.result is None. When len(results) == 1 we expect it to # be results[0] as per _linalg_ops_gen.py. This seems like an # orthogonal bug in the generator of _linalg_ops_gen.py. return linalg.FillOp(output=out, value=zero).results[0]
def matmul_on_tensors(*outer_args): # TODO: in the future, should be writeable more concisely as: # zero = std.constant(0.0, elem_type) # tmp = linalg.fill(out, zero) # linalg.matmul(lhs, rhs, tmp) zero = std.ConstantOp(value=FloatAttr.get(return_elem_type, 0.), result=return_elem_type).result tensor_zero = linalg.FillOp(output=outer_args[-1], value=zero).results[0] args = outer_args[:-1] return op(*args, outs=[tensor_zero])
def fill_buffer(out): zero = std.ConstantOp(value=FloatAttr.get(f32, 0.), result=f32).result linalg.FillOp(output=out, value=zero)
def fill_tensor(out): zero = arith.ConstantOp(value=FloatAttr.get(f32, 0.), result=f32).result return linalg.FillOp(output=out, value=zero).result