def simple_if_else(cond): if_op = scf.IfOp(cond, [i32, i32], hasElse=True) with InsertionPoint(if_op.then_block): x_true = arith.ConstantOp(i32, 0) y_true = arith.ConstantOp(i32, 1) scf.YieldOp([x_true, y_true]) with InsertionPoint(if_op.else_block): x_false = arith.ConstantOp(i32, 2) y_false = arith.ConstantOp(i32, 3) scf.YieldOp([x_false, y_false]) add = arith.AddIOp(if_op.results[0], if_op.results[1]) return
def testConstantOp(): c1 = arith.ConstantOp(IntegerType.get_signless(32), 42) c2 = arith.ConstantOp(IntegerType.get_signless(64), 100) c3 = arith.ConstantOp(F32Type.get(), 3.14) c4 = arith.ConstantOp(F64Type.get(), 1.23) # CHECK: 42 print(c1.literal_value) # CHECK: 100 print(c2.literal_value) # CHECK: 3.140000104904175 print(c3.literal_value) # CHECK: 1.23 print(c4.literal_value)
def simple_if(cond): if_op = scf.IfOp(cond) with InsertionPoint(if_op.then_block): one = arith.ConstantOp(i32, 1) add = arith.AddIOp(one, one) scf.YieldOp([]) return
def testConstantOps(): with Context() as ctx, Location.unknown(): module = Module.create() with InsertionPoint(module.body): arith.ConstantOp(value=42.42, result=F32Type.get()) # CHECK: %cst = arith.constant 4.242000e+01 : f32 print(module)
def fill_tensor(out): zero = arith.ConstantOp(value=FloatAttr.get(f32, 0.), result=f32).result # TODO: FillOp.result is None. When len(results) == 1 we expect it to # be results[0] as per _linalg_ops_gen.py. This seems like an # orthogonal bug in the generator of _linalg_ops_gen.py. return linalg.FillOp(output=out, value=zero).results[0]
def testVectorConstantOp(): int_type = IntegerType.get_signless(32) vec_type = VectorType.get([2, 2], int_type) c1 = arith.ConstantOp( vec_type, DenseElementsAttr.get_splat(vec_type, IntegerAttr.get(int_type, 42))) try: print(c1.literal_value) except ValueError as e: assert "only integer and float constants have literal values" in str(e) else: assert False
def pass_an_op_directly(arg0, arg1): one = arith.ConstantOp(F32Type.get(), 1.0) # CHECK: %[[LHS:.*]] = linalg.fill lhs = linalg.FillOp(arg0, one) # CHECK: %[[RHS:.*]] = linalg.fill rhs = linalg.FillOp(arg1, one) # CHECK: %[[INIT:.*]] = linalg.init_tensor init = linalg.InitTensorOp([4, 8], f32) # CHECK: linalg.matmul # CHECK: ins(%[[LHS]], %[[RHS]] # CHECK: outs(%[[INIT]] return linalg.matmul(lhs, rhs, outs=init)
def fill_buffer(out): zero = arith.ConstantOp(value=FloatAttr.get(f32, 0.), result=f32).result linalg.FillOp(output=out, value=zero)
def fill_tensor(out): zero = arith.ConstantOp(value=FloatAttr.get(f32, 0.), result=f32).result return linalg.FillOp(output=out, value=zero).result
def tensor_static_dim(t): c0 = arith.ConstantOp(indexType, 0) c1 = arith.ConstantOp(indexType, 1) d0 = tensor.DimOp(t, c0) d1 = tensor.DimOp(t, c1) return [d0.result, d1.result]
def fill_buffer(out): zero = arith.ConstantOp(value=FloatAttr.get(f32, 0.), result=f32).result linalg.fill(zero, outs=[out])
def fill_tensor(out): zero = arith.ConstantOp(value=FloatAttr.get(f32, 0.), result=f32).result return linalg.fill(zero, outs=[out])