def generic_form(lhs, rhs): init_result = linalg.InitTensorOp([4, 8], f32) # CHECK: linalg.generic return linalg.matmul(lhs, rhs, outs=[init_result.result], emit_generic=True)
def testInitTensorStaticSizesAttribute(): with Context() as ctx, Location.unknown(): module = Module.create() f32 = F32Type.get() with InsertionPoint(module.body): op = linalg.InitTensorOp([3, 4], f32) # CHECK: [3, 4] print(op.attributes['static_sizes'])
def named_form(lhs, rhs): init_result = linalg.InitTensorOp([4, 8], f32) # CHECK: "linalg.matmul"(%{{.*}}) # CHECK-NEXT: ^bb0(%{{.*}}: f32, %{{.*}}: f32, %{{.*}}: f32): # CHECK-NEXT: std.mulf{{.*}} (f32, f32) -> f32 # CHECK-NEXT: std.addf{{.*}} (f32, f32) -> f32 # CHECK-NEXT: linalg.yield{{.*}} (f32) -> () # CHECK-NEXT: {linalg.memoized_indexing_maps{{.*}}operand_segment_sizes = dense<[2, 1]> : vector<2xi32>} : # CHECK-SAME: (tensor<4x16xf32>, tensor<16x8xf32>, tensor<4x8xf32>) -> tensor<4x8xf32> return linalg.matmul(lhs, rhs, outs=[init_result.result])
def named_form(lhs, rhs): init_result = linalg.InitTensorOp([4, 8], f32) # First check the named form with custom format # CHECK: linalg.matmul # CHECK-NOT: linalg.memoized_indexing_maps # CHECK-SAME: ins(%{{.*}} : tensor<4x16xf32>, tensor<16x8xf32>) # CHECK-SAME: outs(%{{.*}} : tensor<4x8xf32>) # CHECK-SAME: -> tensor<4x8xf32> # CHECK-NEXT: return return linalg.matmul(lhs, rhs, outs=[init_result.result])
def named_form(lhs, rhs): init_result = linalg.InitTensorOp([4, 8], f32) # CHECK: "linalg.matmul"(%{{.*}}) # CHECK-NEXT: ^bb0(%{{.*}}: f32, %{{.*}}: f32, %{{.*}}: f32): # CHECK-NEXT: arith.mulf{{.*}} (f32, f32) -> f32 # CHECK-NEXT: arith.addf{{.*}} (f32, f32) -> f32 # CHECK-NEXT: linalg.yield{{.*}} (f32) -> () # CHECK-NEXT: cast = #linalg.type_fn<cast_signed> # CHECK-SAME: operand_segment_sizes = dense<[2, 1]> : vector<2xi32> # CHECK-SAME: (tensor<4x16xf32>, tensor<16x8xf32>, tensor<4x8xf32>) -> tensor<4x8xf32> return linalg.matmul(lhs, rhs, outs=[init_result.result])
def pass_an_op_directly(arg0, arg1): one = arith.ConstantOp(F32Type.get(), 1.0) # CHECK: %[[LHS:.*]] = linalg.fill lhs = linalg.FillOp(arg0, one) # CHECK: %[[RHS:.*]] = linalg.fill rhs = linalg.FillOp(arg1, one) # CHECK: %[[INIT:.*]] = linalg.init_tensor init = linalg.InitTensorOp([4, 8], f32) # CHECK: linalg.matmul # CHECK: ins(%[[LHS]], %[[RHS]] # CHECK: outs(%[[INIT]] return linalg.matmul(lhs, rhs, outs=init)
def named_form(lhs, rhs): init_result = linalg.InitTensorOp([4, 8], f32) # Check for the named form with custom format # CHECK: linalg.elemwise_unary # CHECK-SAME: cast = #linalg.type_fn<cast_signed> # CHECK-SAME: fun = #linalg.unary_fn<exp> # CHECK-SAME: ins(%{{.*}} : tensor<4x8xf32>) outs(%{{.*}} : tensor<4x8xf32>) unary_result = linalg.elemwise_unary(lhs, outs=[init_result.result]) # CHECK: linalg.elemwise_binary # CHECK-SAME: cast = #linalg.type_fn<cast_unsigned> # CHECK-SAME: fun = #linalg.binary_fn<mul> # CHECK-SAME: ins(%{{.*}}, %{{.*}} : tensor<4x8xf32>, tensor<4x8xf32>) outs(%{{.*}} : tensor<4x8xf32>) # CHECK: return binary_result = linalg.elemwise_binary( lhs, rhs, outs=[init_result.result], fun=BinaryFn.mul, cast=TypeFn.cast_unsigned) return unary_result, binary_result
def test_matmul_mono(lhs, rhs): init_result = linalg.InitTensorOp([4, 8], f32) return matmul_mono(lhs, rhs, outs=[init_result.result])
def zero_d(): return linalg.InitTensorOp([], f32)
def dynamic_sizes(d0, d1): return linalg.InitTensorOp([d0, d1], f32)
def static_sizes(): return linalg.InitTensorOp([3, 4], f32)