def fill_v2_d(y, value, shape, kernel_name="fill_v2_d"): """ interface of fill_v2_d :param y: output :param value: value to fill the shape, float32 :param shape: list int, output shape :param kernel_name: fill_v2_d :return: """ # check kernel name util.check_kernel_name(kernel_name) # shape to list shape = te.lang.cce.util.shape_to_list(shape) util.check_shape_rule(shape) # pseudo input, won't be used. data_x = tvm.placeholder(shape, dtype="float32", name="data_x") # do compute res = fill_v2_compute(data_x, value, shape, y, kernel_name) # new schedule schedule = [tvm.create_schedule(res.op)] elewise_sch = te.lang.cce.te_schedule.cce_schedule.ElewiseSchedule() elewise_sch._get_emit_insn_map = types.MethodType(_get_emit_insn_map, elewise_sch) elewise_sch._do_buffer_tile = types.MethodType(_do_buffer_tile, elewise_sch) elewise_sch.do_schedule([res], schedule, []) schedule = schedule[0] schedule.cce_special = {"tensor_list": (), "orign_out_tensor": [res], "real_out_tensor": [res]} # build operater config = {"name": kernel_name, "tensor_list": (data_x, res)} te.lang.cce.cce_build_code(schedule, config)
def _unpack_schedule(self, block_tiling_axis, right_dim_in, ub_tiling_axis, split_factor): """ unpack schedule function Parameters ---------- block_tiling_axis: int identify spilt axis for multicore right_dim_in: tvm.var the var identify right_dim of output_shape ub_tiling_axis: int identify spilt axis for ub_tiling split_factor: tvm.var the var identify spilt_factor Returns --------- sch: tvm.schedule the compute schedule build_list: list include tvm.tensor of input and tvm.tensor of res """ build_list = [self._input_placeholder] for res_tensor in self.res_tensor_list: build_list.append(res_tensor) sch = tvm.create_schedule(self.virtual_node.op) sch.disable_allocate(cce_params.scope_ubuf) for tensor in self.ub_tensor_list: sch[tensor].set_scope(cce_params.scope_ubuf) right_dim_outer, right_dim_inner = sch[self.virtual_node].split( self.virtual_node.op.axis[block_tiling_axis], factor=right_dim_in) sch[self.virtual_node].bind(right_dim_outer, self.block_idx) if ub_tiling_axis == 0: axis_outer, axis_inner = sch[self.virtual_node].split( self.virtual_node.op.axis[0], factor=1) else: axis_outer, axis_inner = sch[self.virtual_node].split( right_dim_inner, factor=split_factor) for i in range(self.num): sch[self.ub_tensor_list[i]].compute_at( sch[self.virtual_node], axis_outer) sch[self.res_tensor_list[i]].compute_at( sch[self.virtual_node], axis_outer) sch[self.ub_tensor_list[i]].emit_insn( self.ub_tensor_list[i].op.axis[ub_tiling_axis], insn_cmd.DMA_COPY) sch[self.res_tensor_list[i]].emit_insn( self.res_tensor_list[i].op.axis[ub_tiling_axis], insn_cmd.DMA_COPY) sch[self.virtual_node].emit_insn(axis_inner, insn_cmd.PHONY_INSN) return sch, build_list
def load_to_l1(input_x, output_x, kernel_name="load_to_l1"): """ copy data from ddr to l1 Parameters ---------- input_x : TVM tensor the input tensor output_x : dict dict of output_x, include keys(shape and dtype) kernel_name : str kernel name, default value is "load_to_l1" Returns ------- None """ input_shape = input_x.get("shape") input_dtype = input_x.get("dtype") input_tensor = tvm.placeholder(input_shape, name="input_tensor", dtype=input_dtype) res = load_to_l1_compute(input_tensor, output_x, kernel_name=kernel_name) sch = tvm.create_schedule([res.op]) sch[res].set_scope(cce.scope_cbuf_fusion) sch[res].emit_insn(res.op.axis[0], 'dma_copy') tensor_list = [input_tensor, res] with build_config: tvm.build(sch, tensor_list, "cce", name=kernel_name)
def custom_equal(shape_x, shape_y, dtype, kernel_name="cce_tf_equal", need_build=False, need_print=False): """ do element-wise equal operation between two input tensors Parameters: ---------- shape_x : shape of input x shape_y : shape of input y dtype : source data type, support float16,float32,int32,int8,uint8 kernel_name : cce kernel name, default value is "cce_tf_equal" need_buid : if need to build CCEC kernel, default value is False need_print : if need to print the ir, default value is False Returns ------- None """ util.check_kernel_name(kernel_name) util.check_shape_rule(shape_x) util.check_shape_rule(shape_y) check_list = ["float16", "float32", "int32", "int8", "uint8", "bool"] dtype = dtype.lower() if not (dtype in check_list): raise RuntimeError( "tf_equal_cce only support %s while dtype is %s" % (",".join(check_list), dtype)) util.check_shape_size(shape_x, SHAPE_SIZE_LIMIT) util.check_shape_size(shape_y, SHAPE_SIZE_LIMIT) shape_x, shape_y, shape_max = util.produce_shapes(shape_x, shape_y) util.check_shape_size(shape_max, SHAPE_SIZE_LIMIT) x = tvm.placeholder(shape_x, dtype=dtype, name="x") y = tvm.placeholder(shape_y, dtype=dtype, name="y") x_tmp = te.lang.cce.broadcast(x, shape_max) y_tmp = te.lang.cce.broadcast(y, shape_max) res = tvm.compute(shape_max, lambda *i: x_tmp(*i) == y_tmp(*i), name='res') sch = tvm.create_schedule(res.op) if need_print: with build_config: print(tvm.lower(sch, [x, y, res], simple_mode=True)) if need_build: with build_config: tvm.build(sch, [x, y, res], "cce", name=kernel_name)
def custom_logical_not(shape, dtype, kernel_name="cce_tf_logical_not", need_build=False, need_print=False): """ logical not for the input tensor Parameters ---------- shape : input shape of data dtype : the data type, support bool kernel_name : cce kernel name, default value is "cce_logical_not" need_buid : if need to build CCEC kernel, default value is False need_print : if need to print the ir, default value is False Returns ------- None """ util.check_kernel_name(kernel_name) util.check_shape_rule(shape) check_list = ["bool"] if not dtype.lower() in check_list: raise RuntimeError( "logical_not_cce ony supports %s while dtype is %s" % (",".join(check_list), dtype)) util.check_shape_size(shape, SHAPE_SIZE_LIMIT) inp_dtype = dtype.lower() data = tvm.placeholder(shape, name="data", dtype=inp_dtype) with tvm.target.cce(): result = tvm.compute( shape, lambda *i: tvm.select(data[i] is True, False, True), name="result") schedule = tvm.create_schedule(result.op) if need_print: with build_config: print(tvm.lower(schedule, [data, result], simple_mode=True)) if need_build: with build_config: tvm.build(schedule, [data, result], "cce", name=kernel_name)
def depthwise_weight_6d_2_4d(x, y, src_format, dst_format, kernel_name="depthwise_weight_6d_2_4d"): """Operation and Schedule for depthwise_weight_6d_2_4d. Parameters ---------- x: shape and dtype of input, the dtype support float16, float32, int32, uint16. y: the shape and dtype of outputs, the dtype same as input. src_format: the source data_format dst_format: the target data_format kernel_name : cce kernel name, default value is "depthwise_weight_6d_2_4d" Returns ------- convert C1HWNCoC0 tp HWCN """ _check_parameters(x, y, src_format, dst_format) output_shape = y.get("shape") channel_size = output_shape[2] input_shape = x.get("shape") dtype = x.get("dtype") channel_4d = channel_size op_utils.check_shape(input_shape, param_name="x") check_list = ("float16", "float32", "int32", "uint16") dtype = dtype.lower() op_utils.check_dtype(dtype, check_list, param_name="x") input_data = tvm.placeholder(input_shape, name="input_data", dtype=dtype) six2four = _Six2FourParam(input_shape, channel_4d) res = tvm.extern( [six2four.get_out_shape()], [input_data], lambda ins, outs: _intrin_factor(six2four, dtype, ins, outs), name="res", dtype=dtype) sch = tvm.create_schedule(res.op) build_list = [input_data, res] with build_config: tvm.build(sch, build_list, "cce", name=kernel_name)
def depthwise_weight_4d_2_6d(x, y, src_format, dst_format, kernel_name="depthwise_weight_4d_2_6d"): """Operation and Schedule for depthwise_weight_4d_2_6d. Parameters ---------- x: shape and dtype of input, the dtype support float16, float32, int32, uint16. y: the shape and dtype of outputs, the dtype same as input. src_format: the source data_format dst_format: the target data_format kernel_name : cce kernel name, default value is "depthwise_weight_4d_2_6d" Returns ------- convert HWCN to C1HWNCoC0 """ if src_format.lower() != "hwcn": raise RuntimeError("dst_format must be HWCN!") if dst_format.lower() != "c1hwncoc0": raise RuntimeError("src_format must be C1HWNCoC0 !") input_shape = x.get("shape") dtype = x.get("dtype") op_utils.check_shape(input_shape, param_name="x") check_list = ("float16", "float32", "int32", "uint16") dtype = dtype.lower() op_utils.check_dtype(dtype, check_list, param_name="x") input_data = tvm.placeholder(input_shape, name="input_data", dtype=dtype) four2six = _Four2SixParam(input_shape) res = tvm.extern( [four2six.get_out_shape()], [input_data], lambda ins, outs: _intrin_factor(four2six, dtype, ins, outs), name="res", dtype=dtype) sch = tvm.create_schedule(res.op) build_list = [input_data, res] with build_config: tvm.build(sch, build_list, "cce", name=kernel_name)
def zn_2_hwcn(src, dst, src_format, dst_format, kernel_name='zn_2_hwcn'): """ algorithm: zn_2_hwcn calculating: change data format from Zn to HWCN Parameters ---------- src: dict contains shape and dtype information of input tensor dst: dict contains shape and dtype information of output tensor src_format: str represents the format of input tensor, only support "Zn" dst_format: str represents the format of output tensor, only support "HWCN" kernel_name: str cce kernel name, default value is "zn_2_hwcn" Returns ------- None """ _check_parameters(src, dst, src_format, dst_format, kernel_name) dst_shape = dst.get("shape") dtype = src.get("dtype") h_i, w_i, c_i, n_i = dst_shape c_0 = 16 if dtype == "int8": c_0 = 32 c_1 = _ceil_div(c_i, c_0) n_ni = 16 n_no = _ceil_div(n_i, n_ni) shape_zn = [c_1*h_i*w_i, n_no, n_ni, c_0] branch = _get_ir_branch(shape_zn, dtype) data = tvm.placeholder(shape_zn, dtype=dtype, name="data") if branch == "more_row": res = tvm.extern(dst_shape, [data], lambda ins, outs: _more_row_ir(outs[0], ins[0], c_0), name="res", dtype=dtype) else: res = tvm.extern(dst_shape, [data], lambda ins, outs: _split_row_ir(outs[0], ins[0]), name="res", dtype=dtype) tensor_list = [data, res] sch = tvm.create_schedule(res.op) with build_config: tvm.build(sch, tensor_list, "cce", name=kernel_name)
def schedule_tile_cce(out): """Schedule for cce tile arithmetic operator. Parameters ---------- out: TVM Tensor The computation graph description of cce tile. Returns ------- s: Schedule The computation schedule for tile. """ sch = tvm.create_schedule(out.op) return sch
def upsample(x, y, scale=1.0, stride_h=2, stride_w=2, kernel_name="upsample"): """ calculating data Parameters --------- x : dict include shape dtype and format stride_h : int the shape change axis h stride_w : int the shape change axis w scale : float the value of tensor change axis, default value is 1.0 y :output kernel_name : str kernel name, default value is "upsample" Returns ------- None """ upsample_check(x, stride_h, stride_w, kernel_name) dtype = x.get("dtype") op_list, ins_list, tensor_dic, feature, y \ = gen_upsample(x, dtype, scale, stride_h, stride_w) schedule = tvm.create_schedule(y.op) # skip the res buffer buffer_mapping(schedule, op_list[:-1]) tilling_spilt_axis_dic \ = tilling_spilt_axis(schedule, tensor_dic, stride_h, stride_w) cal_axis_dic, axis \ = cal_axis_spilt(x, stride_h, stride_w, tilling_spilt_axis_dic, tensor_dic, schedule) axis_list = upsample_compute(schedule, cal_axis_dic, tensor_dic) res_op = tensor_dic.get("res") ins_emit(schedule, op_list, axis_list, ins_list) if axis == 0: schedule[y].bind(cal_axis_dic.get("axis_xo"), tvm.thread_axis("blockIdx.x")) else: res_out, _ = bind_multcore(axis, x, schedule, res_op) schedule[y].bind(res_out, tvm.thread_axis("blockIdx.x")) with build_config: tvm.build(schedule, [feature, y], "cce", name=kernel_name)
def store_to_gm(input_x, output_x, kernel_name="store_to_gm"): """ copy data from l1 to ddr (l1 --> ub --> ddr) Parameters ---------- input_x : TVM tensor the input tensor output_x : dict dict of output_x, include keys(shape and dtype) kernel_name : str kernel name, default value is "store_to_gm" Returns ------- None """ input_shape = input_x.get("shape") input_dtype = input_x.get("dtype") input_tensor = tvm.placeholder(input_shape, name="input_tensor", dtype=input_dtype) res, res_ub = store_to_gm_compute(input_tensor, output_x, kernel_name=kernel_name) sch = tvm.create_schedule([res.op]) split_axis, split_factor = _tilling_axis(input_shape, input_dtype) axis_outer, axis_inner = sch[res].split(res.op.axis[split_axis], factor=split_factor) sch[res_ub].compute_at(sch[res], axis_outer) sch[input_tensor].set_scope(cce.scope_cbuf_fusion) sch[res_ub].set_scope(cce.scope_ubuf) sch[res_ub].emit_insn(res_ub.op.axis[split_axis], 'dma_copy') sch[res].emit_insn(axis_inner, 'dma_copy') tensor_list = [input_tensor, res] with build_config: tvm.build(sch, tensor_list, "cce", name=kernel_name)
def flatten(x, y, kernel_name="flatten"): """return a copy of the tensor collapsed into one dimension. Parameters ---------- x : dict shape and dtype of input. y : dict shape and dtype of output. kernel_name : str kernel name, default value is "flatten" Returns ------- None """ shape = x.get("shape") dtype = x.get("dtype") dtype_lower = dtype.lower() check_list = ("int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64", "float16", "float32") check_shape(shape, param_name="x") check_dtype(dtype_lower, check_list, param_name="x") size = 1 for i, _ in enumerate(shape): size = size * shape[i] shape_new = [size] data = tvm.placeholder(shape_new, name="data", dtype=dtype_lower) data_ub = tvm.compute(shape_new, lambda *i: data(*i), name='data_ub') res = tvm.compute(shape_new, lambda *i: data_ub(*i), name='res') sch = tvm.create_schedule(res.op) sch[data_ub].set_scope(tbe_platform.scope_ubuf) sch_new = _tile_axis([sch, data_ub, res], shape_new, dtype_lower) with build_config: tvm.build(sch_new, [data, res], "cce", name=kernel_name)
def space_to_batch_d(x, y, block_size, paddings, kernel_name="space_to_batch_d"): """ the main function of space_to_batch_d Parameters ---------- x: dict,shape and datatype,datatype supports float16,float32 y: dict,shape and datatype,datatype supports float16,float32 block_size: must be greater than one. It indicates the block size paddings: (tuple, list),the padding of the input with zeros across the spatial dimensions as follows: paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] kernel_name: cce kernel name, default value is "space_to_batch_d" Returns ------- None """ if len(paddings) == 4: paddings = [[paddings[0], paddings[1]], [paddings[2], paddings[3]]] _check_param(x, y, paddings, block_size, kernel_name) input_shape = x.get("shape") input_dtype = x.get("dtype").lower() block_shape = [block_size, block_size] data = tvm.placeholder(input_shape, name="data", dtype=input_dtype) res = space_to_batch_nd_d_compute(data, y, block_shape, paddings, kernel_name) sch = tvm.create_schedule(res.op) with build_config: tvm.build(sch, [data, res], "cce", name=kernel_name)
def avg_pool_grad_schedule(res): """ the tiling avg pool grad schedule """ s = tvm.create_schedule(res.op) mad_cast = res.op.input_tensors[0] mad_res = mad_cast.op.input_tensors[0] dout_col_pad = mad_res.op.input_tensors[0] weight_rotated = mad_res.op.input_tensors[1] weight = weight_rotated.op.input_tensors[0] dout_col = dout_col_pad.op.input_tensors[0] dout_dilated = dout_col.op.input_tensors[0] dout_mul = dout_dilated.op.input_tensors[0] dout = dout_mul.op.input_tensors[0] dvealuemean = dout_mul.op.input_tensors[1] dout_ubuf = s.cache_read(dout, tbe_platform.scope_ubuf, [dout_mul]) dvealuemean_ubuf = s.cache_read(dvealuemean, tbe_platform.scope_ubuf, [dout_mul]) dout_mul_ubuf = s.cache_write(dout_mul, tbe_platform.scope_ubuf) dout_cbuf_nc1hwc0 = s.cache_write(dout_dilated, tbe_platform.scope_cbuf) dout_dilated_ubuf = s.cache_write(dout_cbuf_nc1hwc0, tbe_platform.scope_ubuf) dout_cbuf_row_major = s.cache_write(dout_col, tbe_platform.scope_cbuf) dout_ca = s.cache_write(dout_col_pad, tbe_platform.scope_ca) s[dout_mul].compute_inline() s[dout_dilated].compute_inline() s[dout_col].compute_inline() s[dout_col_pad].compute_inline() weight_cbuf = s.cache_read(weight, tbe_platform.scope_cbuf, [weight_rotated]) weight_cb = s.cache_write(weight_rotated, tbe_platform.scope_cb) s[weight_rotated].compute_inline() mad_cc = s.cache_write(mad_res, tbe_platform.scope_cc) mad_ubuf = s.cache_write(mad_cast, tbe_platform.scope_ubuf) s[mad_res].compute_inline() s[mad_cast].compute_inline() # get shape value dilated_pad_top = res.op.attrs['dilated_pad'][0].value dilated_pad_bottom = res.op.attrs['dilated_pad'][1].value dilated_pad_left = res.op.attrs['dilated_pad'][2].value dilated_pad_right = res.op.attrs['dilated_pad'][3].value k_height = res.op.attrs['weight_height'].value k_width = res.op.attrs['weight_width'].value block_size = dout.op.shape[len(dout.op.shape) - 1].value _, _, _, dout_dilated_h, dout_dilated_w, _ = dout_dilated.shape input_w = dout_dilated_w.value + dilated_pad_left \ + dilated_pad_right - k_width + 1 input_h = dout_dilated_h.value + dilated_pad_top \ + dilated_pad_bottom - k_height + 1 stride = dout_dilated.op.attrs["strides"][0].value weight_shape = [int(i.value) for i in weight.shape] dout_shape = [int(i.value) for i in dout.shape] dout_dilated_shape = [int(i.value) for i in dout_dilated.shape] mad_cc_axis_n, mad_cc_axis_cg, mad_cc_axis_co1, mad_cc_axis_howomad, \ mad_cc_axis_co0 = mad_cc.op.axis mad_ubuf_axis_n, mad_ubuf_axis_cg, mad_ubuf_axis_co1, \ mad_ubuf_axis_howomad, mad_ubuf_axis_co0 = mad_ubuf.op.axis mad_res_shape = [int(i.value) for i in mad_res.shape] res_block_n, res_block_cgroup, _, _, _ = mad_res_shape #tiling res_l1, tile_input_h, tile_dile_h_ub, tile_m, \ tile_k, tile_n = avg_pool_grad_tiling( input_w, input_h, weight_shape, dout_shape, res, stride) mad_cc_Ncut_o, mad_cc_Ncut_i = s[mad_cc].split(mad_cc_axis_n, factor=1) mad_cc_mcut_o, mad_cc_mcut_i = s[mad_cc].split(mad_cc_axis_howomad, factor=tile_m) mad_cc_kcut_o, mad_cc_kcut_i = s[mad_cc].split(mad_cc.op.reduce_axis[0], factor=tile_k) mad_cc_ncut_o, mad_cc_ncut_i = s[mad_cc].split(mad_cc_axis_co1, factor=tile_n) s[mad_cc].reorder(mad_cc_Ncut_o, mad_cc_axis_cg, mad_cc_ncut_o, mad_cc_mcut_o, mad_cc_kcut_o, mad_cc_Ncut_i, mad_cc_ncut_i, mad_cc_mcut_i, mad_cc_axis_co0, mad_cc_kcut_i, mad_cc.op.reduce_axis[1]) s[dout_ca].compute_at(s[mad_cc], mad_cc_kcut_o) s[weight_cb].compute_at(s[mad_cc], mad_cc_kcut_o) mad_ubuf_Ncut_o, mad_ubuf_Ncut_i = s[mad_ubuf].split(mad_ubuf_axis_n, factor=1) mad_ubuf_mcut_o, mad_ubuf_mcut_i = s[mad_ubuf].split(mad_ubuf_axis_howomad, factor=tile_m) mad_ubuf_ncut_o, mad_ubuf_ncut_i = s[mad_ubuf].split(mad_ubuf_axis_co1, factor=tile_n) s[mad_ubuf].reorder(mad_ubuf_Ncut_o, mad_ubuf_axis_cg, mad_ubuf_ncut_o, mad_ubuf_mcut_o, mad_ubuf_Ncut_i, mad_ubuf_ncut_i, mad_ubuf_mcut_i, mad_ubuf_axis_co0) s[mad_cc].compute_at(s[mad_ubuf], mad_ubuf_mcut_o) conv_Ncut_o, conv_Ncut_i = s[res].split(res.op.axis[0], factor=1) conv_hcut_o, conv_hcut_i = s[res].split(res.op.axis[3], factor=(res_l1)) conv_mcut_o, conv_mcut_i = s[res].split(conv_hcut_i, factor=tile_m) s[res].reorder(conv_Ncut_o, res.op.axis[1], conv_hcut_o, conv_mcut_o, conv_Ncut_i, res.op.axis[2], conv_mcut_i, res.op.axis[4]) s[mad_ubuf].buffer_align((1, 1), (1, 1), (1, 1), (1, block_size), (1, block_size)) s[mad_ubuf].compute_at(s[res], conv_mcut_o) s[dout_cbuf_row_major].buffer_align((1, 1), (1, 1), (input_w, input_w), (1, 1), (1, 1), (1, 1), (1, block_size)) s[dout_cbuf_row_major].compute_at(s[res], conv_hcut_o) s[dout_cbuf_nc1hwc0].compute_at(s[res], conv_hcut_o) s[weight_cbuf].compute_at(s[res], conv_hcut_o) dout_dilated_w = dout_dilated_shape[4] ub_l1hcut_o, ub_l1hcut_i = s[dout_cbuf_nc1hwc0].split( dout_cbuf_nc1hwc0.op.axis[3], factor=tile_dile_h_ub) if stride > 1: dila_o_h, dila_i_h = s[dout_dilated_ubuf].split( dout_dilated_ubuf.op.axis[3], factor=stride) dila_o_w, dila_i_w = s[dout_dilated_ubuf].split( dout_dilated_ubuf.op.axis[4], factor=stride) s[dout_dilated_ubuf].reorder(dila_i_h, dila_i_w, dila_o_h, dila_o_w) s[dout_dilated_ubuf].unroll(dila_i_h) s[dout_dilated_ubuf].unroll(dila_i_w) s[dout_dilated_ubuf].compute_at(s[dout_cbuf_nc1hwc0], ub_l1hcut_o) s[dout_dilated_ubuf].emit_insn(dout_dilated_ubuf.op.axis[0], insn_cmd.DMA_PADDING) else: s[dout_dilated_ubuf].compute_inline() s[dout_mul_ubuf].compute_at(s[dout_cbuf_nc1hwc0], ub_l1hcut_o) s[dout_ubuf].compute_at(s[dout_cbuf_nc1hwc0], ub_l1hcut_o) s[dvealuemean_ubuf].compute_at(s[dout_cbuf_nc1hwc0], ub_l1hcut_o) s[dout_ubuf].emit_insn(dout_ubuf.op.axis[0], insn_cmd.DMA_COPY) s[dvealuemean_ubuf].emit_insn(dvealuemean_ubuf.op.axis[0], insn_cmd.DMA_COPY) s[dout_mul_ubuf].emit_insn(dout_mul_ubuf.op.axis[0], insn_cmd.MUL) s[dout_cbuf_nc1hwc0].emit_insn(ub_l1hcut_i, insn_cmd.DMA_COPY) # emit convolution params. setfmatrix_dict = { "conv_kernel_h": res.op.attrs['weight_height'], "conv_kernel_w": res.op.attrs['weight_width'], "conv_padding_top": res.op.attrs['dilated_pad'][0], "conv_padding_bottom": res.op.attrs['dilated_pad'][1], "conv_padding_left": res.op.attrs['dilated_pad'][2], "conv_padding_right": res.op.attrs['dilated_pad'][3], "conv_stride_h": res.op.attrs['dilated_strides'][0], "conv_stride_w": res.op.attrs['dilated_strides'][1], "conv_fm_c": dout_dilated.shape[2] * dout_dilated.shape[5], "conv_fm_h": dout_dilated.shape[3], "conv_fm_w": dout_dilated.shape[4] } s[dout_cbuf_row_major].emit_insn(dout_cbuf_row_major.op.axis[1], insn_cmd.SET_FMATRIX, setfmatrix_dict) s[dout_ca].emit_insn(dout_ca.op.axis[1], insn_cmd.IM2COL) s[weight_cbuf].emit_insn(weight_cbuf.op.axis[0], insn_cmd.DMA_COPY) s[weight_cb].emit_insn(weight_cb.op.axis[3], insn_cmd.DMA_COPY) s[mad_ubuf].emit_insn(mad_ubuf_Ncut_i, insn_cmd.DMA_COPY) mad_dict = { "mad_pattern": tbe_platform.cce_params.CONV_MODE, "k_outer": mad_cc_kcut_o } s[mad_cc].emit_insn(mad_cc_Ncut_i, insn_cmd.MAD, mad_dict) s[res].emit_insn(conv_Ncut_i, insn_cmd.DMA_COPY) s[dout_ca].double_buffer() s[weight_cb].double_buffer() s[mad_cc].double_buffer() # for multi cores if res_block_n < 16: res_NNCut_o, res_NNCut_i = s[res].split(conv_Ncut_o, nparts=res_block_n) res_ccCut_o, res_ccCut_i = s[res].split(res.op.axis[1], nparts=res_block_cgroup) s[res].reorder(res_NNCut_o, res_ccCut_o, res_NNCut_i, res_ccCut_i) out_fused = s[res].fuse(res_NNCut_o, res_ccCut_o) out_fused_out, _ = s[res].split(out_fused, nparts=res_block_n * res_block_cgroup) bind_out, _ = s[res].split(out_fused_out, 1) blockidx = tvm.thread_axis("blockIdx.x") s[res].bind(bind_out, blockidx) else: block = tvm.thread_axis("blockIdx.x") s[res].bind(conv_Ncut_o, block) return s
def custom_truncatemod(shape1, shape2, dtype, kernel_name="cce_tf_truncatemod", need_build=False, need_print=False): """ do element-wise truncatemod operation between two input tensors Parameters: ---------- shape1 : shape of input data1 shape2 : shape of input data2 dtype : source data type, support float16,float32,int32 kernel_name : cce kernel name, default value is "cce_tf_truncatemod" need_buid : if need to build CCEC kernel, default value is False need_print : if need to print the ir, default value is False Returns ------- None """ max_dim = 8 shape1_len = len(shape1) shape2_len = len(shape2) if shape1_len > max_dim or shape2_len > max_dim: raise RuntimeError( "mod_cce only support up to %d dimensions while the shape's \ dimensions is %d, %d" % (max_dim, shape1_len, shape2_len)) util.check_kernel_name(kernel_name) util.check_shape_rule(shape1) util.check_shape_rule(shape2) util.check_shape_size(shape1, SHAPE_SIZE_LIMIT) util.check_shape_size(shape2, SHAPE_SIZE_LIMIT) check_list = ["float16", "float32", "int32"] device_api_map = {"float16": "cc_device_truncatemod_float16", "float32": "cc_device_truncatemod_float", "int32": "cc_device_truncatemod_int32"} dtype = dtype.lower() if dtype not in check_list: raise RuntimeError( "tf_truncatemod_cce only support %s while dtype is %s" % ( ",".join(check_list), dtype)) shape1, shape2, shape_out = util.produce_shapes(shape1, shape2) util.check_shape_size(shape_out, SHAPE_SIZE_LIMIT) inp_dtype = dtype.lower() device_api = device_api_map[inp_dtype] # block block_num = "block_num" block_idx = "block_idx" # x param v_xndim_cnt = tvm.const(len(shape1), "int32") p_xshape = util.create_param_ptr(shape1, "int32", "p_xshape") xpad_c0 = tvm.const(0, "int32") data_input_x = tvm.placeholder(shape1, name="data_input_x", dtype=inp_dtype) # y param v_yndim_cnt = tvm.const(len(shape2), "int32") p_yshape = util.create_param_ptr(shape2, "int32", "p_yshape") ypad_c0 = tvm.const(0, "int32") data_input_y = tvm.placeholder(shape2, name="data_input_y", dtype=inp_dtype) # output v_out_ndim_cnt = tvm.const(len(shape_out), "int32") p_out_shape = util.create_param_ptr(shape_out, "int32", "p_yshape") out_padc0 = tvm.const(0, "int32") output = tvm.extern(shape_out, [p_xshape, data_input_x, p_yshape, data_input_y, p_out_shape], lambda ins, outs: tvm.call_extern("int32_t", device_api, block_num, block_idx, v_xndim_cnt, ins[0].access_ptr("r"), # shape x xpad_c0, ins[1].access_ptr("r"), # input x v_yndim_cnt, ins[2].access_ptr("r"), # shape y ypad_c0, ins[3].access_ptr("r"), # input y v_out_ndim_cnt, ins[4].access_ptr("r"), # shape out out_padc0, outs[0].access_ptr("w")), name="output", dtype=inp_dtype) schedule = tvm.create_schedule(output.op) # print IR if need_print: with build_config: print(tvm.lower(schedule, [data_input_x, data_input_y, output], simple_mode=True)) # Compile to generate the cce file if need_build: with build_config: tvm.build(schedule, [data_input_x, data_input_y, output], "cce", name=kernel_name)
def _unpack_schedule(input_place, output_shape, y, num, axis, dtype): """Create unpack schedule. Parameters ---------- input_place: TVM tensor the tensor of input. output_shape: tuple or list the shape of output tensor. y: tuple or list the list of output tensor. num : int. the length of the dim axis. axis: int. the axis to unpack along. dtype: str. the dtype of input. Returns ------- sch: schedule the created schedule. build_list: list the list of input and output tensors, tensor type is TVM tensor. """ _, ele_each_block, device_core_num = _get_public_param(dtype) befordim, afterdim = output_shape[0], output_shape[-1] block_idx = tvm.thread_axis('blockIdx.x') # can open multi-core scene if befordim >= ele_each_block and afterdim < ele_each_block: befordim_in = ele_each_block // afterdim + 1 befordim_out = (befordim + befordim_in - 1) // befordim_in while (befordim + befordim_out - 1) // befordim_out * afterdim < ele_each_block: befordim_out -= 1 if befordim_out >= device_core_num: befordim_out = device_core_num afterdim_in = afterdim gm2ub_tensor, ub2ub_tensor_list, ub2gm_tensor_list, virtual_node = _unpack_compute_scalar( input_place, y, num, axis) res_op = [] build_list = [input_place] for ub2gm_tensor in ub2gm_tensor_list: res_op.append(ub2gm_tensor.op) build_list.append(ub2gm_tensor) sch = tvm.create_schedule(virtual_node.op) sch[gm2ub_tensor].set_scope(tbe_platform.scope_ubuf) for tensor in ub2ub_tensor_list: sch[tensor].set_scope(tbe_platform.scope_ubuf) befordim_outer, befordim_inner = sch[virtual_node].split( virtual_node.op.axis[0], nparts=befordim_out) afterdim_outer, afterdim_inner = sch[virtual_node].split( virtual_node.op.axis[2], factor=afterdim_in) sch[virtual_node].reorder(befordim_outer, afterdim_outer, befordim_inner, afterdim_inner) fused_axis = sch[virtual_node].fuse(befordim_outer, afterdim_outer) sch[virtual_node].bind(fused_axis, block_idx) new_shape = ((befordim + befordim_out - 1) // befordim_out, num, afterdim_in) split_axis, split_factor = _tiling_axis(new_shape, dtype) if split_axis == 0: axis_outer, axis_inner = sch[virtual_node].split( befordim_inner, factor=split_factor) else: axis_outer, axis_inner = sch[virtual_node].split( afterdim_inner, factor=split_factor) sch[gm2ub_tensor].compute_at(sch[virtual_node], axis_outer) sch[gm2ub_tensor].emit_insn(gm2ub_tensor.op.axis[split_axis], insn_cmd.DMA_COPY) for i in range(num): sch[ub2gm_tensor_list[i]].compute_at(sch[virtual_node], axis_outer) sch[ub2ub_tensor_list[i]].compute_at(sch[virtual_node], axis_outer) sch[ub2ub_tensor_list[i]].emit_insn( ub2ub_tensor_list[i].op.axis[split_axis], insn_cmd.DATA_MOV) sch[ub2gm_tensor_list[i]].emit_insn( ub2gm_tensor_list[i].op.axis[split_axis], insn_cmd.DMA_COPY) sch[virtual_node].emit_insn(axis_inner, insn_cmd.PHONY_INSN) else: gm2ub_tensor_list, ub2gm_tensor_list, virtual_node = _unpack_compute_copy( input_place, y, num, axis) res_op = [] build_list = [input_place] for ub2gm_tensor in ub2gm_tensor_list: res_op.append(ub2gm_tensor.op) build_list.append(ub2gm_tensor) sch = tvm.create_schedule(virtual_node.op) for tensor in gm2ub_tensor_list: sch[tensor].set_scope(tbe_platform.scope_ubuf) # can open multi-core scene if afterdim >= ele_each_block: if befordim >= device_core_num: befordim_out = device_core_num afterdim_in = afterdim elif befordim == 1: befordim_out = befordim afterdim_in = (afterdim + device_core_num - 1) // device_core_num else: afterdim_outer = device_core_num // befordim afterdim_in = (afterdim + afterdim_outer - 1) // afterdim_outer while afterdim % afterdim_in < ele_each_block: afterdim_in += 1 befordim_out = befordim befordim_outer, befordim_inner = sch[virtual_node].split( virtual_node.op.axis[0], nparts=befordim_out) afterdim_outer, afterdim_inner = sch[virtual_node].split( virtual_node.op.axis[2], factor=afterdim_in) sch[virtual_node].reorder(befordim_outer, afterdim_outer, befordim_inner, afterdim_inner) fused_axis = sch[virtual_node].fuse(befordim_outer, afterdim_outer) sch[virtual_node].bind(fused_axis, block_idx) new_shape = ((befordim + befordim_out - 1) // befordim_out, 1, afterdim_in) split_axis, split_factor = _tiling_axis(new_shape, dtype) if split_axis == 0: axis_outer, axis_inner = sch[virtual_node].split( befordim_inner, factor=split_factor) else: axis_outer, axis_inner = sch[virtual_node].split( afterdim_inner, factor=split_factor) else: split_axis, split_factor = _tiling_axis(output_shape, dtype) axis_outer, axis_inner = sch[virtual_node].split( virtual_node.op.axis[split_axis], factor=split_factor) for i in range(num): storage_axis = split_axis - 1 if split_axis != 0 else 0 sch[gm2ub_tensor_list[i]].storage_align( gm2ub_tensor_list[i].op.axis[storage_axis], ele_each_block, 0) sch[gm2ub_tensor_list[i]].double_buffer() sch[gm2ub_tensor_list[i]].compute_at(sch[virtual_node], axis_outer) sch[ub2gm_tensor_list[i]].compute_at(sch[virtual_node], axis_outer) sch[gm2ub_tensor_list[i]].emit_insn( gm2ub_tensor_list[i].op.axis[split_axis], insn_cmd.DMA_COPY) sch[ub2gm_tensor_list[i]].emit_insn( ub2gm_tensor_list[i].op.axis[split_axis], insn_cmd.DMA_COPY) sch[virtual_node].emit_insn(axis_inner, insn_cmd.PHONY_INSN) return sch, build_list
def cast(input_x, output_y, dst_type, kernel_name="cast"): """ cast a tensor/scaler with input shape form src data type to dst data type. restrictions of input algorithms are as follow only types' groups blow are support tensor process: float16->float32 float16->int32 float32->float16 float32->int32 int8->float32 uint8->float32 int8->float16 uint8->float16 int8->int32 uint8->int32 int32->uint8 // number out of [0,255] can get unexpected result int32->int8 // number out of [-128,127] can get unexpected result int32->float32 // For tans with fp16, only guarantees number in [-1023,1023] get correct result int32->float16 // only guarantees number in [-1023,1023] get correct result scale convert support:(means only support shape [1,]) int64->int32 int64->float32 Parameters ---------- input_x : dict shape and dtype of input, only support float16, float32 output_y: dict shape and dtype of output, should be same shape as input, and the dtype is the dst dtype need to cast kernel_name : str cce kernel name, default value is cast Returns ------- None """ shape = util.scalar2tensor_one(input_x.get("shape")) src_type = input_x.get("dtype").lower() check_shape(shape, param_name="input_x") if src_type == "bool": src_type = "int8" dst_type = _cast_dsttype_conversion(dst_type) fuseshape = [1] fuseshape[0] = reduceIns(lambda x, y: x * y, shape) data = tvm.placeholder(fuseshape, name="data", dtype=src_type) if src_type == "int64": check_dtype(dst_type, ("float32", "int32"), param_name="dst_type") res = tvm.extern( [fuseshape], [data], lambda ins, outs: _kernel_ir(outs, ins, dst_type, "int64"), name="res", dtype=dst_type) tensor_list = [data, res] schedule = tvm.create_schedule(res.op) with build_config: tvm.build(schedule, tensor_list, "cce", name=kernel_name) else: with tvm.target.cce(): res = cast_compute(data, output_y, dst_type, kernel_name) sch = generic.auto_schedule(res) config = { "print_ir": False, "name": kernel_name, "tensor_list": [data, res] } te.lang.cce.cce_build_code(sch, config)
def drop_out_do_mask(input_tensor, input_mask, input_keep_prob, output, kernel_name="dropout_do_mask"): """ algorithm: tf_dropout_do_mask scale_x = x*(1 / keep_prob) res = select(mask == 1, scale_x, 0) Parameters ---------- input_tensor : dict,shape and dtype of input_tensor,only support float16 and float32 input_mask : dict,shape and dtype of input_mask shape of mask,1D, dtype == uint8 length=(size(shape_tensor)+ELEMS_BATCH_PROCESS_FP16 -1)/ELEMS_BATCH_PROCESS_FP16*ELEMS_BATCH_PROCESS_FP16/8 eg. shape_tensor=[2,5,8] shape_mask=[16] shape_res=[2,5,8] shape_tensor=[15,17,19] shape_mask=[608] shape_res=[15,17,19] input_keep_prob : dict,shape and dtype of input_keep_prob shape of keep_prob, only 1 parament and equals to (1) prob scale (0.0,1.0] NOTICE: type same as dytpe output : dict,shape and dtype of output kernel_name : str cce kernel name, default value is "dropout_do_mask" Returns ------- None """ shape_tensor = input_tensor.get("shape") shape_mask = input_mask.get("shape") shape_keep_prob = input_keep_prob.get("shape") dtype = input_tensor.get("dtype") if shape_keep_prob == 1: shape_keep_prob = (shape_keep_prob, ) check_shape(shape_tensor, param_name="input_tensor") check_dtype(dtype.lower(), ["float16", "float32"], param_name="input_tensor") if len(shape_mask) != 1: raise RuntimeError("The length of mask shape must be 1") if shape_keep_prob not in [(1, ), [ 1, ]]: raise RuntimeError("Only support shape (1, ) or [1, ]") # functools_reduce: product of all dimension # Align to ELEMS_BATCH_PROCESS_FP16 product_mask = (functools_reduce(lambda x, y: x*y, shape_tensor[:]) + ELEMS_BATCH_PROCESS_FP16 - 1) // \ ELEMS_BATCH_PROCESS_FP16 * ELEMS_BATCH_PROCESS_FP16 // 8 if product_mask != shape_mask[0]: raise RuntimeError("The mask[0] should=%d, but now=%d" % (product_mask, shape_mask[0])) data_tensor = tvm.placeholder( (functools_reduce(lambda x, y: x * y, shape_tensor), ), dtype=dtype, name="data_tensor") data_mask = tvm.placeholder( (functools_reduce(lambda x, y: x * y, shape_mask), ), dtype='uint8', name="data_mask") keep_prob_tensor = tvm.placeholder(shape_keep_prob, dtype=dtype, name="keep_prob_tensor") const_1 = tvm.const(1.0, dtype=dtype) res = tvm.extern([shape_tensor, shape_mask, shape_keep_prob], [data_tensor, data_mask, keep_prob_tensor], lambda ins, outs: _kernel_ir(outs, ins, const_1), name="res", dtype=dtype) tensor_list = [data_tensor, data_mask, keep_prob_tensor, res] schedule = tvm.create_schedule(res.op) with build_config: tvm.build(schedule, tensor_list, "cce", name=kernel_name)
def custom_round(shape, dtype, kernel_name="cce_round", need_build=False, need_print=False): """ doing round operations, calculating data type is float16 or float32 or int32 Parameters ---------- shape : shape of data dtype : the data type, assume src_dtype equals dst_dtype kernel_name : cce kernel name, default value is "cce_round" need_buid : if need to build CCEC kernel, default value is False need_print : if need to print the ir, default value is False Returns ------- None """ check_list = ["float16", "float32", "int32"] device_api_map = { "float16": "cc_device_round_float16", "float32": "cc_device_round_float", "int32": "cc_device_round_int32" } max_dim = 8 shape_len = len(shape) if shape_len > max_dim: raise RuntimeError( "round_cce only support up to %d dimensions while the shape's dimension is %d" % (max_dim, shape_len)) util.check_kernel_name(kernel_name) util.check_shape_rule(shape) util.check_shape_size(shape, SHAPE_SIZE_LIMIT) if not (dtype.lower() in check_list): raise RuntimeError("round_cce only support %s while dtype is %s" % (",".join(check_list), dtype)) inp_dtype = dtype.lower() shape = util.shape_refine(shape) data_input = tvm.placeholder(shape, name="data_input", dtype=inp_dtype) device_api = device_api_map[inp_dtype] block_num = "block_num" block_idx = "block_idx" v_ndim = tvm.const(len(shape), "int32") padC0 = tvm.const(0, "int32") p_shape = util.create_param_ptr(shape, "int32", "p_shape") output = tvm.extern( shape, [data_input, p_shape], lambda ins, outs: tvm.call_extern( "int32_t", device_api, block_num, block_idx, v_ndim, ins[1].access_ptr("r"), # shape padC0, ins[0].access_ptr("r"), # input x outs[0].access_ptr("w")), name="output", dtype=inp_dtype) s = tvm.create_schedule(output.op) if need_print: with build_config: print(tvm.lower(s, [data_input, output], simple_mode=True)) if need_build: with build_config: tvm.build(s, [data_input, output], "cce", name=kernel_name)
def strided_slice_d(input_x, output_x, begin, end, strides=None, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, kernel_name="strided_slice_d"): """ Extracts a strided slice of a tensor (generalized python array indexing). Roughly speaking, this op extracts a slice of size (end-begin)/stride from the given input_ tensor. Starting at the location specified by begin the slice continues by adding stride to the index until all dimensions are not less than end. Note that a stride can be negative, which causes a reverse slice. Parameters ---------- input_x : dict shape and dtype of input output_x : dict shape and dtype of out begin: list. represents the index of the first value to select. end: list. represents the index of the last value to select. strides: list or tuple. step length to select. begin_mask: int a bitmask where a bit i being 1 means to ignore the begin value and instead use the largest interval possible. end_mask: int analogous to `begin_mask`. ellipsis_mask: int a bitmask where bit `i` being 1 means the `i`th position is actually an ellipsis. new_axis_mask: int a bitmask where bit `i` being 1 means the `i`th specification creates a new shape 1 dimension. shrink_axis_mask: int a bitmask where bit `i` implies that the `i`th specification should shrink the dimensionality. kernel_name : str cce kernel name, default value is "strided_slice_d" Returns ------- None """ input_shape = input_x.get("shape") input_dtype = input_x.get("dtype").lower() check_list = ("float16", "float32", "int32", "uint8", "bool", "int8") check_dtype(input_dtype, check_list, param_name="input_x") check_shape(input_shape, param_name="input_x") begin = list(begin) end = list(end) if not _check_parameter(input_shape, begin, end, strides, ellipsis_mask, new_axis_mask, shrink_axis_mask): raise RuntimeError("Parameter Invalid!") if strides is None: strides = _fill_list_with_ones(len(input_shape)) else: strides = list(strides) input_tensor = tvm.placeholder(input_shape, dtype=input_dtype, name='input_tensor') [output, out_shape] = strided_slice_d_compute(input_tensor, output_x, begin, end, strides, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask, kernel_name=kernel_name) # pylint: disable=locally-disabled,unnecessary-lambda out_tensor = tvm.compute(out_shape, lambda *i: output(*i), name='out_tensor', tag='strided_slice_d|3') input_size = functools_reduce(lambda x, y: x * y, input_shape[0:]) out_size = functools_reduce(lambda x, y: x * y, out_shape[0:]) output_dtype = output_x.get("dtype").lower() output_shape = output_x.get("shape") if input_size == out_size: if output_dtype == "bool": input_x["dtype"] = "int8" output_x["dtype"] = "int8" if len(output_shape) == 0: output_x["shape"] = (1, ) copy_only(input_x, output_x, kernel_name) return output_shape_one = list(output_shape) if ellipsis_mask == 0 and shrink_axis_mask != 0: for i, _ in enumerate(list(input_shape)): if (shrink_axis_mask & 2**i) == 2**i: output_shape_one.insert(i, 1) output_shape = tuple(output_shape_one) # for RL tune getting res fusion_manager.set_op_res(out_tensor) ret, sch = rl_bank.query_rl_bank([out_tensor]) if ret and sch: with build_config: tvm.build(sch, [input_tensor, out_tensor], "cce", name=kernel_name) return sch = tvm.create_schedule(out_tensor.op) sch[output].set_scope(tbe_platform.scope_ubuf) sch_input_shape = [] for dim in output.shape: sch_input_shape.append(dim.value) check_result = _check_last_axis_situation(sch_input_shape, begin, end, strides) if check_result: _schedule_last_axis(sch, sch_input_shape, output, out_tensor, input_dtype) with build_config: tvm.build(sch, [input_tensor, out_tensor], "cce", name=kernel_name) return if _check_tik_branch(input_shape, output_shape, begin, end, strides): begin_shape = copy.deepcopy(begin) end_shape = copy.deepcopy(end) stride_shape = list(strides) stride_shape = copy.deepcopy(stride_shape) input_list = list(input_shape) # update begin_shape, end_shape begin_shape, end_shape, stride_shape = _init_parameter( input_list, begin_shape, end_shape, stride_shape, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask) head_size = 1 for i in range(0, (len(input_shape) - 1)): head_size = head_size * input_shape[i] if input_dtype == "float32" and input_shape[-1] == 2 and \ begin_shape[len(begin_shape) - 1] == 0 and end_shape[len(begin_shape) - 1] == 1 \ and head_size > 128: strided_slice_two_turn_one(input_x, output_x, kernel_name) return if input_list[-1] > 80 and output_shape[-1] == 80: res1 = strided_slice_last_dim_only(input_shape, input_dtype, output_shape, begin_shape, kernel_name) if res1: return if input_list[-1] >= 32 and input_list[-1] < 7500 and len(output_shape) > 1 and \ output_shape[-1] >= 32: res = strided_slice_last_dim_mte(input_shape, input_dtype, output_shape, begin_shape, kernel_name) if res: return res = strided_slice_last_dim(input_shape, input_dtype, output_shape, begin_shape, end_shape, stride_shape, kernel_name) if res: return else: res1 = strided_slice_last_dim_one(input_shape, input_dtype, output_shape, begin_shape, kernel_name) if res1: return split_axis, split_factor = _tilling_axis(out_shape, dtype=input_dtype) core_state = _get_multicore(out_shape, input_dtype, split_axis, split_factor) axis_outer, axis_inner = sch[out_tensor].split( out_tensor.op.axis[split_axis], factor=split_factor) if split_axis == 0: core_num = _get_target_core_num(out_shape[split_axis] // split_factor) axis_outer_outer, axis_outer_inter = sch[out_tensor].split( axis_outer, nparts=core_num) else: core_num = _get_target_core_num(out_shape[0]) axis_outer_outer, axis_outer_inter = sch[out_tensor].split( out_tensor.op.axis[0], nparts=core_num) for i in range(1, split_axis): axis_outer_inter = sch[out_tensor].fuse(axis_outer_inter, out_tensor.op.axis[i]) axis_outer_inter = sch[out_tensor].fuse(axis_outer_inter, axis_outer) sch[output].compute_at(sch[out_tensor], axis_outer_inter) sch[output].emit_insn(output.op.axis[0], insn_cmd.DMA_COPY) # gm-ub if len(out_shape) >= 2: # Convert bytes to Bytes dtype_bytes_size = tbe_platform.cce_intrin.get_bit_len( input_dtype) // 8 # 32 means one block size(32 Bytes), divide by 32 to # get the numbers of data that # can be stored in one block. element = 32 // dtype_bytes_size align_axis = _get_align_axis(out_shape) sch[output].storage_align(output.op.axis[align_axis], element, 0) if core_state: thread_block = tvm.thread_axis("blockIdx.x") sch[out_tensor].bind(axis_outer_outer, thread_block) sch[out_tensor].emit_insn(axis_inner, insn_cmd.DMA_COPY) # ub-gm with build_config: tvm.build(sch, [input_tensor, out_tensor], "cce", name=kernel_name)
def custom_pow(shape, shape_y, dtype, kernel_name="cce_tf_pow", need_build=False, need_print=False): """ calculate x^y, calculating data type is float16 or float32 or int32 when x < 0 , the output is a meaningless value. Parameters ---------- shape : shape of data dtype : the data type, assume src_dtype equals dst_dtype, only support float16, float32, int32 kernel_name : cce kernel name, default value is "tf_pow_cce" need_buid : if need to build CCEC kernel, default value is False need_print : if need to print the ir, default value is False Returns ------- None """ supported_dtypes = ["float16", "float32", "int32"] device_api = "cc_device_pow" util.check_kernel_name(kernel_name) util.check_shape_rule(shape) util.check_shape_size(shape, SHAPE_SIZE_LIMIT) if not dtype.lower() in supported_dtypes: raise RuntimeError("tf_pow_cce only support %s while dtype is %s" % (",".join(supported_dtypes), dtype)) inp_dtype = dtype.lower() shape = util.shape_refine(shape) data_lhs = tvm.placeholder(shape, name="data_lhs", dtype=inp_dtype) data_rhs = tvm.placeholder(shape, name="data_rhs", dtype=inp_dtype) v_datatype = util.get_device_api_dtype(inp_dtype) v_ndim = len(shape) block_num = "block_num" block_idx = "block_idx" pad_c0 = 0 p_scale = util.create_param_ptr([0], inp_dtype, "p_scale") p_shift = util.create_param_ptr([0], inp_dtype, "p_shift") p_power = util.create_param_ptr([0], inp_dtype, "p_power") p_shape = util.create_param_ptr(shape, "int32", "p_shape") output = tvm.extern( shape, [data_lhs, data_rhs, p_scale, p_shift, p_power, p_shape], lambda ins, outs: tvm.call_extern( "int32_t", device_api, block_num, block_idx, v_datatype, ins[2].access_ptr("r"), # scale ins[3].access_ptr("r"), # shift ins[4].access_ptr("r"), # power v_ndim, ins[5].access_ptr("r"), # shape pad_c0, ins[0].access_ptr("r"), # input x v_ndim, v_ndim, ins[5].access_ptr("r"), # shape pad_c0, ins[1].access_ptr("r"), # input y outs[0].access_ptr("w")), name="output", dtype=inp_dtype) schedule = tvm.create_schedule(output.op) if need_print: with build_config: print( tvm.lower(schedule, [data_lhs, data_rhs, output], simple_mode=True)) if need_build: with build_config: tvm.build(schedule, [data_lhs, data_rhs, output], "cce", name=kernel_name)
def custom_Upsample(shape, dtype, scale, data_format="channels_last", kernel_name="cce_darknet_upsample", need_build=False, need_print=False): """ Parameters ---------- shape: input tensor's shape dtype: input tensor's dtype, support:`float16,float32 scale: the upsampling factors data_format: "channels_last" or "channels_first" kernel_name : kernel name, default value is "MyUpsample" need_buid : if need to build CCEC kernel, default value is False need_print : if need to print the ir, default value is False Returns ------- None """ """ TODO: Please refer to the TE DSL Manual, And code here with TE DSL. """ inp_dtype = dtype.lower() check_list = ["float16", "float32", "int32", "int8", "uint8"] if inp_dtype not in check_list: raise RuntimeError("upsample only support %s while dtype is %s" % (",".join(check_list), dtype)) util.check_kernel_name(kernel_name) util.check_shape_rule(shape) util.check_shape_size(shape, SHAPE_SIZE_LIMIT) size = (scale, scale) shape_size = len(shape) if not (shape_size == 4 or shape_size == 5): raise RuntimeError( "upsample only support 4D or 5D while len(shape):%d" % len(shape)) input_tensor = tvm.placeholder(shape, name="input_tensor", dtype=inp_dtype) res = None if shape_size == 5: # shape_size == 5 D-sepecial (N, C1, H, W, C0) output_shape = (shape[0], shape[1], shape[2] * size[0], shape[3] * size[1], shape[4]) res = tvm.compute( output_shape, lambda n, c0, h, w, c: input_tensor[n, c0, h // size[ 0], w // size[1], c]) else: if data_format == "channels_last": output_shape = (shape[0], shape[1] * size[0], shape[2] * size[1], shape[3]) res = tvm.compute( output_shape, lambda n, h, w, c: input_tensor[n, h // size[0], w // size[1], c]) elif data_format == "channels_first": output_shape = (shape[0], shape[1], shape[2] * size[0], shape[3] * size[1]) res = tvm.compute( output_shape, lambda n, c, h, w: input_tensor[n, c, h // size[ 0], w // size[1]]) else: raise RuntimeError( "upsample only support channels_last|channels_first " "while input type %s" % data_format) schedule = tvm.create_schedule(res.op) if need_print: with build_config: print(tvm.lower(schedule, [input_tensor, res], simple_mode=True)) if need_build: with build_config: tvm.build(schedule, [input_tensor, res], "cce", name=kernel_name)
def custom_logical_and(shape_x, shape_y, dtype, kernel_name="cce_tf_logical_and", need_build=False, need_print=False): """ do element-wise logical-and operation between two input tensors Parameters: ---------- shape_x : shape of input data1 shape_y : shape of input data2 dtype : source data type, support "bool" kernel_name : cce kernel name, default value is "cce_tf_logical_and" need_buid : if need to build CCEC kernel, default value is False need_print : if need to print the ir, default value is False Returns ------- None """ util.check_kernel_name(kernel_name) util.check_shape_rule(shape_x) util.check_shape_rule(shape_y) check_list = ["bool"] if not (dtype.lower() in check_list): raise RuntimeError( "logical_and_cce only support %s while dtype is %s" % (",".join(check_list), dtype)) util.check_shape_size(shape_x, SHAPE_SIZE_LIMIT) util.check_shape_size(shape_y, SHAPE_SIZE_LIMIT) inp_dtype = dtype.lower() shape_x, shape_y, shape_max = util.produce_shapes(shape_x, shape_y) data1 = tvm.placeholder(shape_x, dtype=inp_dtype, name="data1") data2 = tvm.placeholder(shape_y, dtype=inp_dtype, name="data2") with tvm.target.cce(): data1_tmp1 = te.lang.cce.broadcast(data1, shape_max) data1_tmp2 = te.lang.cce.broadcast(data2, shape_max) min_value = tvm.const(0, dtype=inp_dtype) res = tvm.compute( shape_max, lambda *i: tvm.select( tvm.all( tvm.any( data1_tmp1(*i) > min_value, data1_tmp1(*i) < -min_value), tvm.any( data1_tmp2(*i) > min_value, data1_tmp2(*i) < -min_value)), True, False), name="res") sch = tvm.create_schedule(res.op) if need_print: with build_config: print(tvm.lower(sch, [data1, data2, res], simple_mode=True)) if need_build: with build_config: tvm.build(sch, [data1, data2, res], "cce", name=kernel_name)
def custom_expm1(shape, dtype, kernel_name="cce_tf_expm1", need_build=False, need_print=False): """ algorithm: expm1 calculating data's expm1, y= (e ** x) - 1,dtype is float16 or float32. Parameters ---------- shape : shape of data. dtype : the data type, assume src_dtype equals dst_dtype, only support float16, float32. kernel_name : cce kernel name, default value is "cce_tf_expm1". need_buid : if need to build CCEC kernel, default value is False. need_print : if need to print the ir, default value is False. Returns ------- None """ # [aicpu] int32_t cc_device_exp(uint32_t blockNum, uint32_t blockIdx, int32_t dataType, const void *scale, const void *shift, # const void *base, int32_t dimCnt, int32_t *shape, uint32_t padC0, const void *x, void *y); supported_dtypes = ["float16", "float32"] util.check_kernel_name(kernel_name) util.check_shape_rule(shape) util.check_shape_size(shape, SHAPE_SIZE_LIMIT) if not (dtype.lower() in supported_dtypes): raise RuntimeError("tf_expm1_cce only support %s while dtype is %s" % (",".join(supported_dtypes), dtype)) inp_dtype = dtype.lower() shape = util.shape_refine(shape) data_input = tvm.placeholder(shape, name="data_input", dtype=inp_dtype) # step 1. calculate y = exp ** x by aicpu api device_api = "DeviceExp" v_datatype = util.get_device_api_dtype(inp_dtype) v_ndim = len(shape) block_num = "block_num" block_idx = "block_idx" padC0 = 0 p_scale = util.create_param_ptr([1], inp_dtype, "p_scale") p_shift = util.create_param_ptr([0], inp_dtype, "p_shift") p_base = util.create_param_ptr([-1], inp_dtype, "p_base") p_shape = util.create_param_ptr(shape, "int32", "p_shape") output_exp = tvm.extern( shape, [data_input, p_scale, p_shift, p_base, p_shape], lambda ins, outs: tvm.call_extern( "int32_t", device_api, block_num, block_idx, v_datatype, ins[1].access_ptr("r"), # scale ins[2].access_ptr("r"), # shift ins[3].access_ptr("r"), # base v_ndim, ins[4].access_ptr("r"), # shape padC0, ins[0].access_ptr("r"), # input x outs[0].access_ptr("w")), name="output_exp", dtype=inp_dtype) offset = tvm.const((-1), dtype=inp_dtype) # step 2. cauculate y = exp ** x - 1 by tvm output = tvm.compute( shape, lambda *indice: output_exp(*indice) + offset.astype(inp_dtype), name="output") # step 3. schedule the computation by tvm s = tvm.create_schedule(output.op) # step 4. build by tvm if need_print: with build_config: print(tvm.lower(s, [data_input, output], simple_mode=True)) if need_build: with build_config: tvm.build(s, [data_input, output], "cce", name=kernel_name)
def max_pool_grad_grad_with_argmax(x, grad, argmax, y, ksize, strides, padding="VALID", kernel_name="cce_max_pool_grad_grad" "_with_argmax"): """ Computes second-order gradients of the maxpooling function. Parameters ---------- x: dict Include info about ori_input, format, ori_format, shape, ori_shape, dtype. grad: dict Include info about grad of ori_input, format, ori_format, shape, ori_shape, dtype. argmax: dict Include info about ori_input, format, ori_format, shape, ori_shape, dtype. y: dict Include info about result of function, format, ori_format, shape, ori_shape, dtype. ksize: list or tuple The size of the window for each dimension of the input tensor. strides: list or tuple The stride of the sliding window of the input tensor. padding: str The type of padding algorithm to use. Only support "VALID" or "SAME" kernel_name: str Cce kernel name, default value is "cce_max_pool_grad_grad_with_argmax" Returns ------- None """ check_shape_and_format_vailded(x, grad, argmax, y, ksize, strides, padding, kernel_name) shape_x = x.get("shape") shape_grad = grad.get("shape") shape_argmax = argmax.get("shape") shape_argmax = (shape_argmax[0], shape_argmax[1], shape_argmax[2], shape_argmax[3] * shape_argmax[4], 1) dtype_x = x.get("dtype").lower() dtype_grad = grad.get("dtype").lower() ori_format_x = x.get("ori_format") x_tensor = tvm.placeholder(shape_x, dtype=dtype_x, name="input_x") # argmax is continuous bool, real type is uint16 _, _, _, howo, _ = shape_argmax shape_argmax_boolean = (shape_argmax[0], shape_argmax[1] * shape_argmax[2], howo // 16, 16, shape_argmax[4]) shape_argmax_boolean = list(shape_argmax_boolean[:-1]) + list( [shape_argmax_boolean[-1] * 16]) argmax_tensor = tvm.placeholder(shape_argmax_boolean, dtype="bool", name="argmax") grad_tensor = tvm.placeholder(shape_grad, dtype=dtype_grad, name="input_grad") compute_list = _max_pool_grad_grad_with_argmax_compute( [x_tensor, argmax_tensor, grad_tensor], x, argmax, grad, y, ksize, strides, padding, ori_format_x, kernel_name) res = compute_list[-1] sch = tvm.create_schedule(res.op) _max_pool_grad_grad_with_argmax_schedule(compute_list, [sch]) tensor_list = [x_tensor, grad_tensor, argmax_tensor, res] new_config = build_config_update(build_config, "dummy_placeholder", True) with new_config: tvm.build(sch, tensor_list, "cce", name=kernel_name)
def custom_Reduction(shape, dtype, axis, op, coeff, kernel_name="cce_reductionLayer", need_build=False, need_print=False): """ Reduce a tensor on a certain axis, and scale output with coeff Parameters ---------- shape : shape of data dtype : source data type, only support float16, float32, int8, uint8 axis : the first axis to reduce, may be negative to index from the end (e.g., -1 for the last axis). If axis == 0, the output Blob always has the empty shape (count 1), performing reduction across the entire input. op : can only be one of "SUM, ASUM (sum of abs), SUMSQ (sum of sqr), MEAN" coeff : scale for output kernel_name : cce kernel name, default value is "cce_reductionLayer" need_buid : if need to build CCEC kernel, default value is False need_print : if need to print the ir, default value is False Returns ------- None """ util.check_kernel_name(kernel_name) util.check_shape_rule(shape) check_list = ["float16", "float32", "int8", "uint8"] if not dtype.lower() in check_list: raise RuntimeError( "reductionLayer_cce only support %s while dtype is %s" % (",".join(check_list), dtype)) reduction_op = ("SUM", "ASUM", "SUMSQ", "MEAN") if not isinstance(axis, int): raise RuntimeError("type of axis value should be int") if op not in reduction_op: raise RuntimeError("op can only be one of SUM, ASUM, SUMSQ , MEAN") if not isinstance(coeff, int) and not isinstance(coeff, float): raise RuntimeError("coeff must be a value") axis_origin = axis shape_origin = shape axis = util.axis_check(len(shape), axis) util.check_reduce_shape_rule(shape) shape = list(shape) shape1 = shape[:axis] + [ functools_reduce(lambda x, y: x * y, shape[axis:]) ] shape1, axis = util.shape_refine(shape1, axis) if not axis: axis = [0] shape1 = [1] + shape1 inp_dtype = dtype.lower() data = tvm.placeholder(shape1, name="data_input", dtype=inp_dtype) with tvm.target.cce(): res = caffe_reduction_layer_compute([data], shape_origin, dtype, axis_origin, op, coeff, kernel_name, need_build, need_print) if op == "MEAN" and (inp_dtype == "int8" or inp_dtype == "uint8"): util.check_shape_size(shape, SHAPE_SIZE_LIMIT) res = te.lang.cce.cast_to(res, inp_dtype) schedule = tvm.create_schedule(res.op) if need_print: with build_config: print(tvm.lower(schedule, [data, res], simple_mode=True)) if need_build: with build_config: tvm.build(schedule, [data, res], "cce", name=kernel_name) else: with tvm.target.cce(): sch = generic.auto_schedule(res) config = { "print_ir": need_print, "need_build": need_build, "name": kernel_name, "tensor_list": [data, res] } te.lang.cce.cce_build_code(sch, config)
def histogram_fixed_width_d(x, range, y, nbins, dtype="int32", kernel_name='histogram_fixed_width_d'): """this operation returns a rank 1 histogram counting the number of entries in `values` that fell into every bin. The bins are equal width and determined by the arguments `value_range` and `nbins`. Parameters ---------- x: dict dict info of input value, must include the keys(shape and dtype). range: dict dict info of input value_range, must include the keys(shape and dtype). the shape must be (2,) or [2] y: dict dict info of output nbins: int number of histogram bins. dtype: str data type for returned histogram. kernel_name: str cce kernel name, default value is "histogram_fixed_width" returns ------- None """ input_shape_list = [x.get("shape"), range.get("shape")] input_dtype = x.get("dtype") dtype_input = input_dtype.lower() check_shape(input_shape_list[0], param_name="x") check_shape(input_shape_list[1], param_name="range") util.compare_tensor_dict_key(x, range, "dtype") data_shape_size = util.check_tensor_shape_size(list(input_shape_list[0])) data_range_shape_size = util.check_tensor_shape_size( list(input_shape_list[1])) check_dtype(dtype_input, ("float16", "float32", "int32"), param_name="x") if data_range_shape_size != 2: raise RuntimeError("the shape of range must be (2,) or [2]") if nbins <= 0: raise RuntimeError("the nbins must be > 0") data = tvm.placeholder([data_shape_size], dtype=dtype_input, name="input_data") range_data = tvm.placeholder([data_range_shape_size], dtype=dtype_input, name="input_range_data") res = histogram_fixed_width_d_compute(data, range_data, y, nbins, kernel_name) sch = tvm.create_schedule(res.op) with build_config: tvm.build(sch, [data, range_data, res], "cce", name=kernel_name)
def custom_batch_matmul(shape_x, shape_y, dtype, trans_a=False, trans_b=False, kernel_name="cce_tf_batch_matmul", need_build=False, need_print=False): """ Multiplies slices of two tensors in batches(each slice can be viewed as an element of a batch), the output is of the same batch size. Each of the individual slices can optionally be transposed before multiplication by setting the trans_a or trans_b flag to True, which are by default False. The input tensors are 2-D or higher with the shape [..., r_x, c_x] and [..., r_y, c_y]. The output tensor is 2-D or higher with the shape [..., r_o, c_o], where r_o = c_x if trans_a else r_x c_o = r_y if trans_b else c_y Parameters ---------- shape_x : shape of the first tensor x with rank > 1 shape_y : shape of the second tensor y with the same type and shape with x dtype : the data type, support int8, uint8,float16,float32,int32 kernel_name : cce kernel name, default value is "cce_batch_matmul" trans_a : if True, shape_x is transposed before multiplication trans_b : if True, shape_y is transposed before multiplication need_buid : if need to build CCEC kernel, default value is False need_print : if need to print the ir, default value is False Returns ------- None """ util.check_kernel_name(kernel_name) util.check_shape_rule(shape_x) util.check_shape_rule(shape_y) util.check_shape_size(shape_x, SHAPE_SIZE_LIMIT) util.check_shape_size(shape_y, SHAPE_SIZE_LIMIT) data_dtype = dtype.lower() check_list = ["int8", "uint8", "float16", "float32", "int32"] if data_dtype not in check_list: raise RuntimeError( "batch_matmul_cce ony supports %s while dtype is %s" % (",".join(check_list), dtype)) def transpose_tensor(shape, size): """Transpose the shape, e.g., the shape [..., r_x, c_x] is transposed to [..., c_x, r_x]. Parameters ---------- shape : shape of a tensor size : length of the shape Returns ------- shape_ori : the transposed shape """ shape_ori = () if size == 1: shape_ori = shape_ori + shape elif size == 2: shape_ori = shape_ori + (shape[1], ) + (shape[0], ) else: shape_ori = shape_ori + (shape[:(size - 2)]) + ( shape[size - 1], ) + (shape[size - 2], ) return shape_ori def check_matmul(shape_x, shape_y): """Check whether batch_matmul is supported or not. Parameters ---------- shape_x : shape of the first tensor x shape_y : shape of the second tensor y with the same type and shape with x Returns ------- None """ len_x = len(shape_x) len_y = len(shape_y) if (len_x < 2) or (len_y < 2): raise RuntimeError("Only tensors of rank>=2 are supported!") if shape_x[len_x - 1] != shape_y[len_y - 2]: raise RuntimeError( "Invalid matrix multiplication for the inner 2 dimensions!") if (len_x == len_y) and (len_x > 2): for i in range(len_x - 2): if shape_x[i] != shape_y[i]: raise RuntimeError("Outer dimensions do not match!") return elif (len_x == len_y) and (len_x == 2): return else: raise RuntimeError("The input tensors are not with the same rank!") def _compute(output_shape, x, y, K, trans_a, trans_b, *indices): """matmul compuation in terms of the output shape and the transposes Parameters ---------- output_shape : the final output shape, e.g., shape_x = (2, 6), shape_y = (8, 2), trans_a = True, True_b = True, then, output_shape = (6, 8). x : the first input tensor according to shape_x. y : the second input tensor according to shape_y. K : the number of the axis for sum, in the above example, K = 2. trans_a : if True, x needs to be transposed. trans_b : if True, y needs to be transposed. *indices : the output shape space for tvm.compute. Returns ------- tvm.Tensor """ n_len = len(output_shape) k = tvm.reduce_axis((0, K), 'k') if trans_a is True and trans_b is False: # For example, A: (6, 7, 8), B: (6, 7, 9), so the length is n = 3 # C = A' * B : (6, 8, 9), A' means the transpose of A # indices means the space of (6, 8, 9), k = 7 # x_indices = indices[:1]+(7, )+indices[1:2] = (6, 7, 8) # y_indices = indices[:1]+(7, )+indices[2:] = (6, 7, 9) x_indices = indices[:(n_len - 2)] + (k, ) + indices[(n_len - 2): (n_len - 1)] y_indices = indices[:(n_len - 2)] + (k, ) + indices[(n_len - 1):] return tvm.sum(x(*x_indices) * y(*y_indices), axis=k) elif not trans_a and trans_b: # For example, A: (6, 7, 8), B: (6, 9, 8), C = A * B' : (6, 7, 9) # indices means the space of (6, 7, 9), n=3, k = 8 # x_indices = indices[:2]+(8, ) = (6, 7, 8) # y_indices = indices[:1]+indices[2:]+(8, ) = (6, 9, 8) x_indices = indices[:(n_len - 1)] + (k, ) y_indices = indices[:(n_len - 2)] + indices[(n_len - 1):] + (k, ) return tvm.sum(x(*x_indices) * y(*y_indices), axis=k) elif trans_a and trans_b: # For example, A: (6, 8, 10), B: (6, 12, 8), C = A' * B' : \ # (6, 10, 12) # indices means the space of (6, 10, 12), n=3, k = 8 # x_indices = indices[:1]+(8, )+indices[1:2] = (6, 8, 10) # y_indices = indices[:1]+indices[2:]+(8, ) = (6, 12, 8) x_indices = indices[:(n_len - 2)] + (k, ) + indices[(n_len - 2): (n_len - 1)] y_indices = indices[:(n_len - 2)] + indices[(n_len - 1):] + (k, ) return tvm.sum(x(*x_indices) * y(*y_indices), axis=k) else: # For example, A: (6, 15, 16), B: (6, 16, 18), C = A * B : \ # (6, 15, 18) # indices means the space of (6, 15, 18), n=3, k = 16 # x_indices = indices[:2]+(16, ) = (6, 15, 16) # y_indices = indices[:1]+(16, )+indices[2:] = (6, 16, 18) x_indices = indices[:(n_len - 1)] + (k, ) y_indices = indices[:(n_len - 2)] + (k, ) + indices[(n_len - 1):] return tvm.sum(x(*x_indices) * y(*y_indices), axis=k) def check_supportted_shape_size(shape_x, shape_y, limit, trans_a, trans_b): """ check shape size for operator ---------- shape: shape of data limit: limit of the product Returns ------- None """ # This function is used to check whether the shape is too large to \ # cause a timeout. # shape_x = (a,b,c,d,e,k) shape_y = (a,b,c,d,k,f) # t_1 : time consumed by each addition operation # t_2 : time consumed by each multiplication operation # t_all : time consumed by a complete calculation # t_all is approximately equal to (a*b*c*d)*(e*k*f)*(t_1+t_2) # As (t_1 + t_2) is a constant, so t_all is proportional to \ # (a * b * c * d * e * k * f) len_x = len(shape_x) len_y = len(shape_y) if (len_x < 2) or (len_y < 2): raise RuntimeError("Only tensors of rank>=2 are supported!") shape_x = list(shape_x) shape_y = list(shape_y) tmp_shape_x = shape_x[:] if trans_a: tmp_shape_x = shape_x[:-2] + [shape_x[-1], shape_x[-2]] tmp_shape_y = shape_y[:] if trans_b: tmp_shape_y = shape_y[:-2] + [shape_y[-1], shape_y[-2]] union_shape = tmp_shape_x + [tmp_shape_y[-1]] union_size = reduce(lambda i, j: i * j, union_shape) if union_size > limit: raise RuntimeError("the shape is too large to calculate") if data_dtype in ["float16", "float32", "int32"]: type_shape_map = { 'float16': SHAPE_SIZE_FP16_LIMIT, 'float32': SHAPE_SIZE_FP32_LIMIT, 'int32': SHAPE_SIZE_INT32_LIMIT } check_supportted_shape_size(shape_x, shape_y, type_shape_map[data_dtype], trans_a, trans_b) x_size = len(shape_x) y_size = len(shape_y) shape_a = shape_x shape_b = shape_y if trans_a is True: shape_x = transpose_tensor(shape_x, x_size) if trans_b is True: shape_y = transpose_tensor(shape_y, y_size) check_matmul(shape_x, shape_y) last_axis = shape_x[x_size - 1] x_temp = tvm.placeholder(shape_a, name="input_1", dtype=data_dtype) y_temp = tvm.placeholder(shape_b, name="input_2", dtype=data_dtype) # output shape output_shape = () for i in range(x_size - 1): output_shape = output_shape + (shape_x[i], ) output_shape = output_shape + (shape_y[x_size - 1], ) result = tvm.compute( output_shape, lambda *indices: _compute(output_shape, x_temp, y_temp, last_axis, trans_a, trans_b, *indices), name="result") schedule = tvm.create_schedule(result.op) if need_print: with build_config: print( tvm.lower(schedule, [x_temp, y_temp, result], simple_mode=True)) if need_build: with build_config: tvm.build(schedule, [x_temp, y_temp, result], "cce", name=kernel_name)
def custom_Exp(shape, dtype, gamma, alpha, beta, kernel_name="cce_exp", need_build=False, need_print=False): """ calculate gamma **(alpha * data + beta), calculate exp(log(gamma) * alpha * data) * (gamma ** beta) Parameters ---------- shape : shape of data dtype : the data type, assume src_dtype equals dst_dtype, only support \ float16, float32 gamma : the data type must be same with dtype parameter args in (alpha * data + beta) ** gamma, base alpha : the data type must be same with dtype parameter args in (alpha * data + beta) ** gamma, scale beta : the data type must be same with dtype parameter args in (alpha * data + beta) ** gamma, shift kernel_name : cce kernel name, default value is "cce_exp" need_buid : if need to build CCEC kernel, default value is False need_print : if need to print the ir, default value is False Returns ------- None """ supported_dtypes = ["float16", "float32"] device_api = "DeviceExp" util.check_kernel_name(kernel_name) util.check_shape_rule(shape) util.check_shape_size(shape, SHAPE_SIZE_LIMIT) if not dtype.lower() in supported_dtypes: raise RuntimeError( "caffe_exp_layer_cce only support %s while dtype is %s" % (",".join(supported_dtypes), dtype)) if gamma != -1 and gamma <= 0: # api cc_device_exp_c handle gamma == -1 as e raise ValueError( "please ensure gamma is greater than 0, where gamma = %s" % str(gamma)) inp_dtype = dtype.lower() shape = util.shape_refine(shape) data_input = tvm.placeholder(shape, name="data_input", dtype=inp_dtype) v_datatype = util.get_device_api_dtype(inp_dtype) v_ndim = len(shape) block_num = "block_num" block_idx = "block_idx" pad_c0 = 0 p_scale = util.create_param_ptr([alpha], inp_dtype, "p_scale") p_shift = util.create_param_ptr([beta], inp_dtype, "p_shift") p_base = util.create_param_ptr([gamma], inp_dtype, "p_base") p_shape = util.create_param_ptr(shape, "int32", "p_shape") # scale --> alpha, shitf --> beta, base --> gamma output = tvm.extern( shape, [data_input, p_scale, p_shift, p_base, p_shape], lambda ins, outs: tvm.call_extern( "int32_t", device_api, block_num, block_idx, v_datatype, ins[1].access_ptr("r"), # scale ins[2].access_ptr("r"), # shift ins[3].access_ptr("r"), # base v_ndim, ins[4].access_ptr("r"), # shape pad_c0, ins[0].access_ptr("r"), # input x outs[0].access_ptr("w")), name="output", dtype=inp_dtype) schedule = tvm.create_schedule(output.op) if need_print: with build_config: print(tvm.lower(schedule, [data_input, output], simple_mode=True)) if need_build: with build_config: tvm.build(schedule, [data_input, output], "cce", name=kernel_name)
def SpatialTransformer(input_shape, out_shape, dtype="float32", kernel_name="SpatialTransformer", need_build = True, need_print = False): """Spatial Transformer Layer Implements a spatial transformer layer as described in [1]_. Based on [2]_. Parameters ---------- input_shape : the shape of input tensor [num_batch, height, width, num_channels] out_shape: float the height and width of output tensor [out_height, out_width]. out_size: tuple of two ints The size of the output of the network (height, width) dtype: data type kernel_name : kernel name, default value is "SpatialTransformer" need_buid : if need to build CCEC kernel, default value is True need_print : if need to print the ir, default value is False Returns ------- tvm.Tensor References ---------- .. [1] Spatial Transformer Networks Max Jaderberg, Karen Simonyan, Andrew Zisserman, Koray Kavukcuoglu .. [2] https://github.com/tensorflow/models/tree/master/research/transformer """ def _meshgrid(height, width): y0 = tvm.compute((height,), lambda i: -1 + i * 2.0 / (height - 1), name = 'y0') x0 = tvm.compute((width,), lambda i: -1 + i * 2.0 / (width - 1), name = 'x0') y = tvm.compute((height * width,), lambda i: y0[i // width], name = 'y') x = tvm.compute((height * width,), lambda i: x0[i % width], name = 'x') y = topi.reshape(y, (1, height * width)) x = topi.reshape(x, (1, height * width)) ones = tvm.compute((1, height * width), lambda i,j:1, name = 'ones') grid = tvm.compute((3, height * width),lambda i,j: 0.5 * (i - 1) * (i - 2) * x[0,j] + i * (2 - i) * y[0,j] + 0.5 * i * (i-1) * ones[0,j], name = 'grid') #grid = topi.concatenate((x,y,ones),0) #can not use topi.concatenate return grid def _interpolate(im, im_shape, x, y, out_size, dtype): num_batch = im_shape[0] height = im_shape[1] width = im_shape[2] channels = im_shape[3] out_height = out_size[0] out_width = out_size[1] max_y = int(im_shape[1] - 1) max_x = int(im_shape[2] - 1) #[-1,1] -> [0, width-1] x = topi.multiply(topi.add(x, tvm.const(1, dtype=dtype)), width / tvm.const(2, dtype=dtype)) y = topi.multiply(topi.add(y, tvm.const(1, dtype=dtype)), height / tvm.const(2, dtype=dtype)) # do sampling dim3 = out_height * out_width * num_batch x0 = topi.cast(topi.floor(x), 'int32') y0 = topi.cast(topi.floor(y), 'int32') x1 = topi.add(x0,tvm.const(1, dtype="int32")) y1 = topi.add(y0,tvm.const(1, dtype="int32")) x0 = topi.clip(x0, 0, max_x) x1 = topi.clip(x1, 0, max_x) y0 = topi.clip(y0, 0, max_y) y1 = topi.clip(y1, 0, max_y) dim2 = width dim1 = width * height base = tvm.compute((dim3,),lambda i:(i // (out_height * out_width)) * width * height, name = 'base') base_y0 = topi.add(base, topi.multiply(y0, dim2)) base_y1 = topi.add(base, topi.multiply(y1, dim2)) idx_a = topi.add(base_y0, x0) idx_b = topi.add(base_y1, x0) idx_c = topi.add(base_y0, x1) idx_d = topi.add(base_y1, x1) im_flat = topi.reshape(im, (num_batch * height * width, channels)) im_flat = topi.cast(im_flat, dtype) Ia = tvm.compute((dim3, channels),lambda i,j: im_flat[idx_a[i], j], name = 'Ia') Ib = tvm.compute((dim3, channels),lambda i,j: im_flat[idx_b[i], j], name = 'Ib') Ic = tvm.compute((dim3, channels),lambda i,j: im_flat[idx_c[i], j], name = 'Ic') Id = tvm.compute((dim3, channels),lambda i,j: im_flat[idx_d[i], j], name = 'Id') x0_f = topi.cast(x0, dtype) x1_f = topi.cast(x1, dtype) y0_f = topi.cast(y0, dtype) y1_f = topi.cast(y1, dtype) wa = topi.expand_dims(topi.multiply(topi.subtract(x1_f, x), topi.subtract(y1_f, y)), 1) wb = topi.expand_dims(topi.multiply(topi.subtract(x1_f, x), topi.subtract(y, y0_f)), 1) wc = topi.expand_dims(topi.multiply(topi.subtract(x, x0_f), topi.subtract(y1_f, y)), 1) wd = topi.expand_dims(topi.multiply(topi.subtract(x, x0_f), topi.subtract(y, y0_f)), 1) output = topi.add(topi.add(topi.add(topi.multiply(wa, Ia), topi.multiply(wb, Ib)),topi.multiply(wc, Ic)), topi.multiply(wd, Id)) return output def _transform(theta, input_dim, out_size, input_shape, dtype): num_batch = input_shape[0] height = input_shape[1] width = input_shape[2] num_channels = input_shape[3] theta = topi.reshape(theta, (num_batch, 2, 3)) theta = topi.cast(theta, dtype) out_height = out_size[0] out_width = out_size[1] grid = _meshgrid(out_height, out_width) grid = topi.reshape(grid, (num_batch, 3, out_height*out_width)) grid = topi.cast(grid, dtype=dtype) k = tvm.reduce_axis((0, 3), 'k') T_g = tvm.compute((num_batch, 2, out_height*out_width),lambda b, y, x: tvm.sum(theta[b, y, k] * grid[b, k, x], axis = k), name = 'T_g') x_s = tvm.compute((num_batch, 1, out_height*out_width), lambda i,j,k:T_g[i,0,k], name = 'x_s') y_s = tvm.compute((num_batch, 1, out_height*out_width), lambda i,j,k:T_g[i,1,k], name = 'y_s') x_s_flat = topi.reshape(x_s, (num_batch*out_height*out_width,)) y_s_flat = topi.reshape(y_s, (num_batch*out_height*out_width,)) input_transformed = _interpolate(input_dim, input_shape, x_s_flat, y_s_flat, out_size, dtype) output = topi.reshape(input_transformed, [num_batch, out_height, out_width, num_channels]) return output num_batch = input_shape[0] input_height = input_shape[1] input_width = input_shape[2] channel = input_shape[3] U = tvm.placeholder((num_batch, input_height, input_width, channel), name="U", dtype=dtype) theta = tvm.placeholder((num_batch, 6, 1, 1), dtype=dtype) output = _transform(theta, U, out_shape, input_shape, dtype) s = tvm.create_schedule(output.op) if need_print: with build_config: print(tvm.lower(s, [U, theta, output], simple_mode=True)) if need_build: with build_config: tvm.build(s, [U, theta, output], "cce", name=kernel_name)