def get_inx(x, image_width, target_width, coordinate_transformation_mode, start_x=0, end_x=-1): """Infer input x from output x with various coordinate transformation methods""" scale_x = te.div(image_width.astype("float"), target_width.astype("float")) if coordinate_transformation_mode == "half_pixel": in_x = (x + 0.5) * scale_x - 0.5 elif coordinate_transformation_mode == "align_corners": in_x = (image_width - 1).astype("float") / (target_width - 1) * x elif coordinate_transformation_mode == "asymmetric": in_x = scale_x * x elif coordinate_transformation_mode == "pytorch_half_pixel": in_x = te.if_then_else(target_width > 1, (x + 0.5) * scale_x - 0.5, 0.0) elif coordinate_transformation_mode == "tf_half_pixel_for_nn": in_x = (x + 0.5) * scale_x elif coordinate_transformation_mode == "tf_crop_and_resize": in_x = te.if_then_else( target_width > 1, start_x * (image_width - 1) + x * (end_x - start_x) * (image_width - 1).astype("float") / (target_width - 1), 0.5 * (start_x + end_x) * (image_width - 1), ) else: raise ValueError( "Unsupported coordinate_transformation_mode: {}".format( coordinate_transformation_mode)) return in_x
def my_bilinear_sample_nchw(bottom_data, n, c, h, w, h_size, w_size): h_low = te.floor(h) w_low = te.floor(w) h_high = h_low + 1 w_high = w_low + 1 lh = h - h_low lw = w - w_low hh = 1 - lh hw = 1 - lw zero = tvm.tir.const(0.0, bottom_data.dtype) v1 = te.if_then_else(tvm.tir.all(h_low >= 0 , w_low >= 0),\ bottom_data[n, c, h_low, w_low], zero) return v1 v2 = te.if_then_else(tvm.tir.all(h_low >= 0, w_high <= w_size - 1), \ bottom_data[n, c, h_low, w_high], zero) v3 = te.if_then_else(tvm.tir.all(h_high <= h_size - 1, w_low >= 0), \ bottom_data[n, c, h_high, w_low], zero) v4 = te.if_then_else( tvm.tir.all(h_high <= h_size - 1, w_high <= w_size - 1), bottom_data[n, c, h_high, w_high], zero) w1 = hh * hw w2 = hh * lw w3 = lh * hw w4 = lh * lw val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4 return val
def get_iny_inx(y, x, image_height, image_width, target_height, target_width, coordinate_transformation_mode): """ Infer input x,y from output x,y with various coordinate transformation methods """ scale_y = te.div(image_height.astype("float"), target_height.astype("float")) scale_x = te.div(image_width.astype("float"), target_width.astype("float")) if coordinate_transformation_mode == "half_pixel": in_y = (y + 0.5) * scale_y - 0.5 in_x = (x + 0.5) * scale_x - 0.5 elif coordinate_transformation_mode == "align_corners": in_y = (image_height - 1).astype("float") / (target_height - 1) * y in_x = (image_width - 1).astype("float") / (target_width - 1) * x elif coordinate_transformation_mode == "asymmetric": in_y = scale_y * y in_x = scale_x * x elif coordinate_transformation_mode == "pytorch_half_pixel": in_y = te.if_then_else(target_height > 1, (y + 0.5) * scale_y - 0.5, 0.0) in_x = te.if_then_else(target_width > 1, (x + 0.5) * scale_x - 0.5, 0.0) elif coordinate_transformation_mode == "tf_half_pixel_for_nn": in_y = (y + 0.5) * scale_y in_x = (x + 0.5) * scale_x else: raise ValueError( "Unsupported coordinate_transformation_mode: {}".format( coordinate_transformation_mode)) return in_y, in_x
def within_index(b, e, s, i): """Return a boolean value that indicates if i is within the given index. Parameters ---------- b : Expr beginning of the index e : Expr end of the index s : Expr strides of index i : Expr array position Returns ------- selected: Expr bool expression that is True is the array position would be selected by the index and False otherwise """ bc = tvm.tir.Select(s < 0, i <= e, i < b) ec = tvm.tir.Select(s < 0, i > b, i >= e) ss = te.if_then_else(s < 0, ((i - e) + (e % te.abs(s)) + 1) % te.abs(s), (i - b) % s) return tvm.tir.Select(tvm.tir.Or(bc, ec), tvm.tir.const(False), ss.equal(0))
def test_estimate_flop(): N = 512 A, B, C = matmul_auto_scheduler_test(N, N, N) dag = auto_scheduler.ComputeDAG([A, B, C]) assert abs(dag.flop_ct - 2 * N**3) < 0.5 D = topi.nn.relu(C) dag = auto_scheduler.ComputeDAG([A, B, D]) assert abs(dag.flop_ct - (2 * N**3 + N * N)) < 0.5 # should not count the comparison operations in padding E = topi.nn.pad(C, [1, 1]) dag = auto_scheduler.ComputeDAG([A, B, E]) assert abs(dag.flop_ct - 2 * N**3) < 0.5 F = te.compute((N, N), lambda i, j: E[i, j], name="F", attrs={"FLOP": 1234}) dag = auto_scheduler.ComputeDAG([A, B, F]) assert abs(dag.flop_ct - (2 * N**3 + 1234)) < 0.5 A = te.placeholder((N, N), dtype="float32", name="A") F = te.compute((N, N), lambda i, j: te.if_then_else(A[i, j] > 0, A[i, j], 0)) dag = auto_scheduler.ComputeDAG([A, F]) assert abs(dag.flop_ct - N**2) < 0.5
def test_vectorize_with_if_cond_int64(): m = te.size_var("m", dtype="int64") A = te.placeholder((m, ), name="A", dtype="float32") B = te.compute((m, ), lambda i: te.if_then_else(i < 2, A[i], A[i] * 2), name="B") s = te.create_schedule(B.op) x, y = s[B].split(B.op.axis[0], factor=4) s[B].vectorize(y) f = tvm.build(s, [A, B], "llvm")
def _compute_block(i, nb_j, j): row_start = weight_indptr[nb_j] row_end = weight_indptr[nb_j + 1] row_elems = row_end - row_start elem_idx = te.reduce_axis((0, k // bs_c), name="elem_idx") block_offset = row_start + elem_idx c = te.reduce_axis((0, bs_c), name="c") block_j = weight_indices[block_offset] block_ij_val = weight_data[block_offset][j][c] x_val = data[i, bs_c * block_j + c] prod = block_ij_val * x_val return te.sum(te.if_then_else(elem_idx < row_elems, prod, tvm.tir.const(0.0, dtype=data.dtype)), axis=[elem_idx, c])
def _get_pixel_value(n, c, h, w): if padding_mode == "zeros": return te.if_then_else( te.all(h >= 0, w >= 0, h < in_height, w < in_width), data[n, c, h, w], tir.const(0.0, dtype=data.dtype), ) if padding_mode == "border": h_b = te.max(te.min(h, in_height - 1), 0) w_b = te.max(te.min(w, in_width - 1), 0) return data[n, c, h_b, w_b] raise AssertionError("unsupported padding_mode")
def test_explicit_partition_hint(): A = te.placeholder((16,), name="A") B = te.placeholder((16,), name="B") C = te.compute((32,), lambda i: te.if_then_else(i < 16, A[i], B[i]), name="C") s = te.create_schedule(C.op) s.normalize() s[C].pragma(s[C].op.axis[0], "loop_partition_hint") mod = tvm.driver.build_module.schedule_to_module(s, [A, B, C], "main", None) with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}): mod = tvm.tir.transform.StorageFlatten(64)(mod) mod = tvm.tir.transform.LoopPartition()(mod) mod = tvm.tir.transform.Simplify()(mod) assert tvm.ir.structural_equal(mod["main"], partitioned_concat)
def make_relu(shape, tgt, tgt_host, func_name, dtype="float32"): """TODO: Your code here""" """Hint: use tvm.max, tvm.const(0, A.dtype)""" A=te.placeholder(shape,dtype = dtype, name="A") B=te.compute(A.shape,lambda *i:te.if_then_else(A(*i)>0,A(*i),0)) s=te.create_schedule(B.op) if tgt=="cuda": bx,tx=s[B].split(B.op.axis[0],factor=64) s[B].bind(bx,te.thread_axis("blockIdx.x")) s[B].bind(tx, te.thread_axis("threadIdx.x")) f=tvm.build(s,[A,B],tgt,tgt_host,name=func_name) return f
def padding(X, ph, pw, val=0): """Pad X with the given value in 2-D ph, pw : height and width padding val : padding value, default 0 """ assert len(X.shape) >= 2 nh, nw = X.shape[-2], X.shape[-1] return te.compute( (*X.shape[0:-2], nh + ph * 2, nw + pw * 2), lambda *i: te.if_then_else( te.any(i[-2] < ph, i[-2] >= nh + ph, i[-1] < pw, i[-1] >= nw + pw), val, X[i[:-2] + (i[-2] - ph, i[-1] - pw)]), name='PaddedX')
def _dilate(*indices): not_zero = [] index_tuple = [] for i in range(n): if not strides[i] == 1: index_tuple.append(idx_div(indices[i], strides[i])) not_zero.append(idx_mod(indices[i], strides[i]).equal(0)) else: index_tuple.append(indices[i]) if not_zero: not_zero = te.all(*not_zero) return te.if_then_else(not_zero, padded(*index_tuple), tir.const(0.0, padded.dtype)) return padded(*index_tuple)
def _resize_2d( indices, data, roi, image_height, image_width, target_height, target_width, boxes=None, box_indices=None, method=None, extrapolation_value=0.0, layout="NCHW", coordinate_transformation_mode="align_corners", rounding_method="", alpha=-0.5, exclude_outside=0, out_dtype=None, ): """Perform resize operation on the data with selected method and options. Parameters ---------- indices : tuple The indices of input data data : tvm.te.Tensor inputs is a 4-D tensor with shape [batch, channel, in_height, in_width] or [batch, in_height, in_width, channel] roi: Tuple of Float or Expr The region of interest for cropping the input image. Expected to be of size 4, and format [start_h, start_w, end_h, end_w]. Only used if coordinate_transformation_mode is tf_crop_and_resize. image_height : integer Input image height image_width : integer Input image width target_height : integer The target resized image height target_width : integer The target resized image width boxes : tvm.te.Tensor, optional A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies the coordinates of a box. method: string, optional method of interpolation ("nearest", "linear", "bicubic") box_indices : tvm.te.Tensor, optional A 1-D tensor of shape [num_boxes], box_indices[i] specifies the data that the i-th box refers to. extrapolation_value: float, optional Value used for extrapolation, when applicable. layout: string, optional "NCHW", "NHWC", or "NCHWc". coordinate_transformation_mode : string, optional Describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor. [half_pixel, align_corners, asymmetric, pytorch_half_pixel, tf_half_pixel_for_nn, and tf_crop_and_resize]. rounding_method: string, optional indicates how to find the "nearest" pixel in nearest_neighbor method [round, floor, ceil] alpha: float, optional Bicubic spline coefficient exclude_outside: bool, optional: Exclude values outside the image fdor bicubic interpolation out_dtype: string, optional Type to return. If left None will be same as input type. Returns ------- output : out_dtype The computed result with type out_dtype """ def _cast_output(value, data_dtype="float32", out_dtype=None): if out_dtype: dtype = out_dtype else: dtype = data_dtype return value.astype(dtype) height_use_int_div = False width_use_int_div = False if method == "nearest_neighbor" and coordinate_transformation_mode == "asymmetric": height_use_int_div = can_convert_multiply_to_intdiv( image_height, target_height) width_use_int_div = can_convert_multiply_to_intdiv( image_width, target_width) n, c, y, x, cc, inum, ic = get_2d_indices(indices, layout) box_idx = box_indices(n) if box_indices is not None else n if boxes is not None: y1, x1 = boxes(n, 0), boxes(n, 1) y2, x2 = boxes(n, 2), boxes(n, 3) in_h = (image_height - 1) * (y2 - y1) in_w = (image_width - 1) * (x2 - x1) h_scale = in_h.astype("float") / (target_height - 1) w_scale = in_w.astype("float") / (target_width - 1) in_y = y1 * (image_height - 1) + h_scale * y in_x = x1 * (image_width - 1) + w_scale * x else: in_x = get_inx( x, image_width, target_width, coordinate_transformation_mode, roi[1], roi[3], width_use_int_div, ) in_y = get_inx( y, image_height, target_height, coordinate_transformation_mode, roi[0], roi[2], height_use_int_div, ) if method == "nearest_neighbor": if rounding_method == "": if coordinate_transformation_mode == "align_corners": rounding_method = "round" else: rounding_method = "floor" closest_x_index = get_closest_index(in_x, rounding_method, boxes, width_use_int_div) closest_y_index = get_closest_index(in_y, rounding_method, boxes, height_use_int_div) value = get_2d_pixel( data, layout, image_height, image_width, box_idx, c, closest_y_index, closest_x_index, cc, inum, ic, ) elif method == "linear": y_int = te.floor(in_y).astype("int32") x_int = te.floor(in_x).astype("int32") y_lerp = in_y - y_int x_lerp = in_x - x_int p = [[0 for i in range(2)] for j in range(2)] for j in range(2): for i in range(2): p[j][i] = get_2d_pixel( data, layout, image_height, image_width, box_idx, c, y_int + j, x_int + i, cc, inum, ic, ) top = _lerp(*p[0], x_lerp) bottom = _lerp(*p[1], x_lerp) value = _lerp(top, bottom, y_lerp) elif method == "cubic": xint = te.floor(in_x).astype("int32") xfract = in_x - te.floor(in_x) yint = te.floor(in_y).astype("int32") yfract = in_y - te.floor(in_y) # Get the surrounding values p = [[0 for i in range(4)] for j in range(4)] for j in range(4): for i in range(4): p[j][i] = get_2d_pixel( data, layout, image_height, image_width, box_idx, c, yint + j - 1, xint + i - 1, cc, inum, ic, ) wx = _cubic_spline_weights(xfract, alpha) wy = _cubic_spline_weights(yfract, alpha) if exclude_outside: for i in range(4): wx[i] = te.if_then_else( te.any(xint - 1 + i < 0, xint + i > image_width), 0.0, wx[i]) wy[i] = te.if_then_else( te.any(yint - 1 + i < 0, yint + i > image_height), 0.0, wy[i]) sum_wx = sum(wx) sum_wy = sum(wy) wx = [w / sum_wx for w in wx] wy = [w / sum_wy for w in wy] col0 = _cubic_kernel(p[0], wx) col1 = _cubic_kernel(p[1], wx) col2 = _cubic_kernel(p[2], wx) col3 = _cubic_kernel(p[3], wx) value = _cubic_kernel([col0, col1, col2, col3], wy) else: raise ValueError("Unknown resize method:", method) if coordinate_transformation_mode == "tf_crop_and_resize": out = tvm.tir.if_then_else( in_y < 0, extrapolation_value, tvm.tir.if_then_else(in_y > image_height - 1, extrapolation_value, value), ) # use extrapolation_value if in_x is out of boundary value = tvm.tir.if_then_else( in_x < 0, extrapolation_value, tvm.tir.if_then_else(in_x > image_width - 1, extrapolation_value, out), ) return _cast_output(value, data.dtype, out_dtype=out_dtype)
def _resize_1d( indices, data, roi, image_width, target_width, boxes=None, box_indices=None, method=None, extrapolation_value=0.0, layout="NCW", coordinate_transformation_mode="align_corners", rounding_method="", alpha=-0.5, exclude_outside=0, out_dtype=None, ): """Perform resize operation on the data with selected method and options. Parameters ---------- indices : tuple The indices of input data data : tvm.te.Tensor inputs is a 3-D tensor with shape [batch, channel, in_width] or [batch, in_width, channel] roi: Tuple of Float or Expr The region of interest for cropping the input image. Expected to be of size 2, and format [start_w, end_w]. Only used if coordinate_transformation_mode is tf_crop_and_resize. image_width : integer Input image width target_width : integer The target resized image width boxes : tvm.te.Tensor, optional A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies the coordinates of a box. box_indices : tvm.te.Tensor, optional A 1-D tensor of shape [num_boxes], box_indices[i] specifies the data that the i-th box refers to. extrapolation_value: float, optional Value used for extrapolation, when applicable. layout: string, optional "NCW", "NWC", or "NCWc". method: string, optional method of interpolation ("nearest", "linear", "bicubic") coordinate_transformation_mode : string, optional Describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor. [half_pixel, align_corners, asymmetric, pytorch_half_pixel, tf_half_pixel_for_nn, and tf_crop_and_resize]. rounding_method: string, optional indicates how to find the "nearest" pixel in nearest_neighbor method [round, floor, ceil] alpha: float, optional Bicubic spline coefficient exclude_outside: bool, optional: Exclude values outside the image fdor bicubic interpolation out_dtype: string, optional Type to return. If left None will be same as input type. Returns ------- output : out_dtype The computed result with type out_dtype """ def _cast_output(value, data_dtype="float32", out_dtype=None): if out_dtype: dtype = out_dtype else: dtype = data_dtype return value.astype(dtype) n, c, x, cc, inum, ic = get_1d_indices(indices, layout) box_idx = box_indices(n) if box_indices is not None else n if boxes is not None: # TODO(mbrookhart): Find an example of this raise NotImplementedError( "resize1d with image boxes not yet implemented") in_x = get_inx( x, image_width, target_width, coordinate_transformation_mode, roi[0], roi[1], ) if method == "nearest_neighbor": if rounding_method == "": if coordinate_transformation_mode == "align_corners": rounding_method = "round" else: rounding_method = "floor" closest_x_index = get_closest_index(in_x, rounding_method, boxes) value = get_1d_pixel( data, layout, image_width, box_idx, c, closest_x_index, cc, inum, ic, ) elif method == "linear": x_int = te.floor(in_x).astype("int32") x_lerp = in_x - x_int p = [0 for i in range(2)] for i in range(2): p[i] = get_1d_pixel( data, layout, image_width, box_idx, c, x_int + i, cc, inum, ic, ) value = _lerp(*p, x_lerp) elif method == "cubic": xint = te.floor(in_x).astype("int32") xfract = in_x - te.floor(in_x) # Get the surrounding values p = [0 for i in range(4)] for i in range(4): p[i] = get_1d_pixel( data, layout, image_width, box_idx, c, xint + i - 1, cc, inum, ic, ) wx = _cubic_spline_weights(xfract, alpha) if exclude_outside: for i in range(4): wx[i] = te.if_then_else( te.any(xint - 1 + i < 0, xint + i > image_width), 0.0, wx[i]) sum_wx = sum(wx) wx = [w / sum_wx for w in wx] value = _cubic_kernel(p, wx) else: raise ValueError("Unknown resize method:", method) if coordinate_transformation_mode == "tf_crop_and_resize": # use extrapolation_value if in_x is out of boundary value = tvm.tir.if_then_else( in_x < 0, extrapolation_value, tvm.tir.if_then_else(in_x > image_width - 1, extrapolation_value, value), ) return _cast_output(value, data.dtype, out_dtype=out_dtype)
def _resize_3d( indices, data, roi, image_depth, image_height, image_width, target_depth, target_height, target_width, boxes=None, box_indices=None, method=None, extrapolation_value=0.0, layout="NCHW", coordinate_transformation_mode="align_corners", rounding_method="", alpha=-0.5, exclude_outside=0, out_dtype=None, ): """Perform resize operation on the data with selected method and options. Parameters ---------- indices : tuple The indices of input data data : tvm.te.Tensor inputs is a 4-D tensor with shape [batch, channel, in_height, in_width] or [batch, in_height, in_width, channel] roi: Tuple of Float or Expr The region of interest for cropping the input image. Expected to be of size 6, and format [start_d, start_h, start_w, end_d, end_h, end_w]. Only used if coordinate_transformation_mode is tf_crop_and_resize. image_depth : integer Input image depth image_height : integer Input image height image_width : integer Input image width target_depth : integer The target resized image depth target_height : integer The target resized image height target_width : integer The target resized image width boxes : tvm.te.Tensor, optional A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies the coordinates of a box. box_indices : tvm.te.Tensor, optional A 1-D tensor of shape [num_boxes], box_indices[i] specifies the data that the i-th box refers to. method: string, optional method of interpolation ("nearest", "linear", "bicubic") extrapolation_value: float, optional Value used for extrapolation, when applicable. layout: string, optional "NCHW", "NHWC", or "NCHWc". coordinate_transformation_mode : string, optional Describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor. [half_pixel, align_corners, asymmetric, pytorch_half_pixel, tf_half_pixel_for_nn, and tf_crop_and_resize]. rounding_method: string, optional indicates how to find the "nearest" pixel in nearest_neighbor method [round, floor, ceil] alpha: float, optional Bicubic spline coefficient exclude_oiutside: bool, optional: Exclude values outside the image fdor bicubic interpolation out_dtype: string, optional Type to return. If left None will be same as input type. Returns ------- output : out_dtype The computed result with type out_dtype """ def _cast_output(value, data_dtype="float32", out_dtype=None): if out_dtype: dtype = out_dtype else: dtype = data_dtype return value.astype(dtype) n, c, z, y, x, cc = get_3d_indices(indices, layout) box_idx = box_indices(n) if box_indices is not None else n if boxes is not None: # TODO(mbrookhart): Find an example of this raise NotImplementedError( "resize1d with image boxes not yet implemented") in_z = get_inx(z, image_depth, target_depth, coordinate_transformation_mode, roi[2], roi[5]) in_y = get_inx(y, image_height, target_height, coordinate_transformation_mode, roi[1], roi[4]) in_x = get_inx(x, image_width, target_width, coordinate_transformation_mode, roi[0], roi[3]) if method == "nearest_neighbor": if rounding_method == "": if coordinate_transformation_mode == "align_corners": rounding_method = "round" else: rounding_method = "floor" closest_z_index = get_closest_index(in_z, rounding_method, boxes) closest_y_index = get_closest_index(in_y, rounding_method, boxes) closest_x_index = get_closest_index(in_x, rounding_method, boxes) value = get_3d_pixel( data, layout, image_depth, image_height, image_width, box_idx, c, closest_z_index, closest_y_index, closest_x_index, cc, ) elif method == "linear": z_int = te.floor(in_z).astype("int32") y_int = te.floor(in_y).astype("int32") x_int = te.floor(in_x).astype("int32") z_lerp = in_z - z_int y_lerp = in_y - y_int x_lerp = in_x - x_int p = [[[0 for i in range(2)] for j in range(2)] for k in range(2)] for k in range(2): for j in range(2): for i in range(2): p[k][j][i] = get_3d_pixel( data, layout, image_depth, image_height, image_width, box_idx, c, z_int + k, y_int + j, x_int + i, cc, ) l = [[0 for i in range(2)] for j in range(2)] for j in range(2): for i in range(2): l[j][i] = _lerp(*p[j][i], x_lerp) top = _lerp(*l[0], y_lerp) bottom = _lerp(*l[1], y_lerp) value = _lerp(top, bottom, z_lerp) elif method == "cubic": zint = te.floor(in_z).astype("int32") zfract = in_z - te.floor(in_z) yint = te.floor(in_y).astype("int32") yfract = in_y - te.floor(in_y) xint = te.floor(in_x).astype("int32") xfract = in_x - te.floor(in_x) # Get the surrounding values p = [[[0 for i in range(4)] for j in range(4)] for k in range(4)] for k in range(4): for j in range(4): for i in range(4): p[k][j][i] = get_3d_pixel( data, layout, image_depth, image_height, image_width, box_idx, c, zint + k - 1, yint + j - 1, xint + i - 1, cc, ) wz = _cubic_spline_weights(zfract, alpha) wy = _cubic_spline_weights(yfract, alpha) wx = _cubic_spline_weights(xfract, alpha) if exclude_outside: for i in range(4): wz[i] = te.if_then_else( te.any(xint - 1 + i < 0, xint + i > image_height), 0.0, wx[i]) wy[i] = te.if_then_else( te.any(yint - 1 + i < 0, yint + i > image_height), 0.0, wy[i]) wx[i] = te.if_then_else( te.any(xint - 1 + i < 0, xint + i > image_width), 0.0, wx[i]) sum_wz = sum(wz) sum_wy = sum(wy) sum_wx = sum(wx) wz = [w / sum_wz for w in wz] wy = [w / sum_wy for w in wy] wx = [w / sum_wx for w in wx] l = [[0 for i in range(4)] for j in range(4)] for j in range(4): for i in range(4): l[j][i] = _cubic_kernel(p[j][i], wx) col0 = _cubic_kernel(l[0], wy) col1 = _cubic_kernel(l[1], wy) col2 = _cubic_kernel(l[2], wy) col3 = _cubic_kernel(l[3], wy) value = _cubic_kernel([col0, col1, col2, col3], wz) else: raise ValueError("Unknown resize method:", method) if coordinate_transformation_mode == "tf_crop_and_resize": out = tvm.tir.if_then_else( in_z < 0, extrapolation_value, tvm.tir.if_then_else(in_z > image_depth - 1, extrapolation_value, value), ) out = tvm.tir.if_then_else( in_y < 0, extrapolation_value, tvm.tir.if_then_else(in_y > image_height - 1, extrapolation_value, value), ) # use extrapolation_value if in_x is out of boundary value = tvm.tir.if_then_else( in_x < 0, extrapolation_value, tvm.tir.if_then_else(in_x > image_width - 1, extrapolation_value, out), ) return _cast_output(value, data.dtype, out_dtype=out_dtype)
def _get_pixel_value(n, c, h, w): return te.if_then_else( te.all(h >= 0, w >= 0, h < in_height, w < in_width), data[n, c, h, w], tir.const(0.0, dtype=data.dtype), )
import tvm from tvm import te n = 1024 k = 3 pad = 2 A = te.placeholder((n, n), name='A') W = te.placeholder((k, k), name='W') m = (n - k + 2 * pad) + 1 Apad = te.compute((n + 2 * pad, n + 2 * pad), lambda yy, xx: te.if_then_else( te.all(yy >= pad, yy < pad + n, xx >= pad, xx < pad + n), A[yy - pad, xx - pad], tvm.tir.const(0., "float32")), name='Apad') ry = te.reduce_axis((0, k), name='ry') rx = te.reduce_axis((0, k), name='rx') B = te.compute( (m, m), lambda yy, xx: te.sum(Apad[yy + ry, xx + rx] * W[ry, rx], axis=[ry, rx]), name='B') s = te.create_schedule(B.op) print(tvm.lower(s, [A, W, B], simple_mode=True)) print("---------cutting line---------") s[Apad].compute_inline() print(tvm.lower(s, [A, W, B], simple_mode=True)) exit(0)
def resize_bicubic( indices, data, image_height, image_width, target_height, target_width, boxes=None, box_indices=None, extrapolation_value=None, layout="NCHW", coordinate_transformation_mode="align_corners", out_dtype=None, alpha=-0.5, exclude_outside=0, ): """Perform resize operation with bicubic method on the data. More details about Bicubic interpolation please refer to https://en.wikipedia.org/wiki/Bicubic_interpolation. This algorithm is doing a bicubic spline interpolation Parameters ---------- indices : tuple The indices of input data data : tvm.te.Tensor inputs is a 4-D tensor with shape [batch, channel, in_height, in_width] or [:batch, in_height, in_width, channel] image_height : integer Input image height image_width : integer Input image width target_height : integer The target resized image height target_width : integer The target resized image width boxes : tvm.te.Tensor, optional A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies the coordinates of a box. box_indices : tvm.te.Tensor, optional A 1-D tensor of shape [num_boxes], box_indices[i] specifies the data that the i-th box refers to. extrapolation_value: float, optional Value used for extrapolation, when applicable. layout: string, optional "NCHW", "NHWC", or "NCHWc". coordinate_transformation_mode: string, optional Describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor. Refer to the ONNX Resize operator specification for details. Available options are "half_pixel", "align_corners" and "asymmetric". out_dtype: string, optional Type to return. If left None will be same as input type. alpha: float, optional Bicubic spline coefficient Returns ------- output : out_dtype The computed result with type out_dtype """ def _cast_output(value, data_dtype="float32", out_dtype=None): if out_dtype: dtype = out_dtype else: dtype = data_dtype return value.astype(dtype) n, c, y, x, cc, inum, ic = get_2d_indices(indices, layout) box_idx = box_indices(n) if box_indices is not None else n if boxes is not None: y1, x1 = boxes(n, 0), boxes(n, 1) y2, x2 = boxes(n, 2), boxes(n, 3) in_h = (image_height - 1) * (y2 - y1) in_w = (image_width - 1) * (x2 - x1) h_scale = in_h.astype("float") / (target_height - 1) w_scale = in_w.astype("float") / (target_width - 1) in_y = y1 * (image_height - 1) + h_scale * y in_x = x1 * (image_width - 1) + w_scale * x else: in_y, in_x = get_iny_inx( y, x, image_height, image_width, target_height, target_width, coordinate_transformation_mode, ) xint = te.floor(in_x).astype("int32") xfract = in_x - te.floor(in_x) yint = te.floor(in_y).astype("int32") yfract = in_y - te.floor(in_y) # Get the surrounding values p = [[0 for i in range(4)] for j in range(4)] for j in range(4): for i in range(4): p[j][i] = get_2d_pixel( data, layout, boxes, image_height, image_width, box_idx, c, yint + j - 1, xint + i - 1, cc, inum, ic, ) # Interpolate bicubically def _cubic_spline_weights(t): t2 = t * t t3 = t * t * t w1 = alpha * (t3 - 2 * t2 + t) w2 = (alpha + 2) * t3 - (3 + alpha) * t2 + 1 w3 = -(alpha + 2) * t3 + (3 + 2 * alpha) * t2 - alpha * t w4 = -alpha * t3 + alpha * t2 return [w1, w2, w3, w4] def _cubic_kernel(inputs, w): return sum([a_i * w_i for a_i, w_i in zip(inputs, w)]) wx = _cubic_spline_weights(xfract) wy = _cubic_spline_weights(yfract) if exclude_outside: for i in range(4): wx[i] = te.if_then_else( te.any(xint - 1 + i < 0, xint + i > image_width), 0.0, wx[i]) wy[i] = te.if_then_else( te.any(yint - 1 + i < 0, yint + i > image_height), 0.0, wy[i]) sum_wx = sum(wx) sum_wy = sum(wy) wx = [w / sum_wx for w in wx] wy = [w / sum_wy for w in wy] col0 = _cubic_kernel(p[0], wx) col1 = _cubic_kernel(p[1], wx) col2 = _cubic_kernel(p[2], wx) col3 = _cubic_kernel(p[3], wx) value = _cubic_kernel([col0, col1, col2, col3], wy) # use extrapolation_value if in_y/in_x is out of boundary if extrapolation_value is not None: out = tvm.tir.if_then_else( in_y < 0, extrapolation_value, tvm.tir.if_then_else(in_y > image_height - 1, extrapolation_value, value), ) value = tvm.tir.if_then_else( in_x < 0, extrapolation_value, tvm.tir.if_then_else(in_x > image_width - 1, extrapolation_value, out), ) return _cast_output(value, data.dtype, out_dtype=out_dtype)