Beispiel #1
0
def my_bilinear_sample_nchw(bottom_data, n, c, h, w, h_size, w_size):
    h_low = te.floor(h)
    w_low = te.floor(w)
    h_high = h_low + 1
    w_high = w_low + 1

    lh = h - h_low
    lw = w - w_low
    hh = 1 - lh
    hw = 1 - lw

    zero = tvm.tir.const(0.0, bottom_data.dtype)

    v1 = te.if_then_else(tvm.tir.all(h_low >= 0 , w_low >= 0),\
                         bottom_data[n, c, h_low, w_low], zero)
    return v1

    v2 = te.if_then_else(tvm.tir.all(h_low >= 0, w_high <= w_size - 1), \
                            bottom_data[n, c, h_low, w_high], zero)

    v3 = te.if_then_else(tvm.tir.all(h_high <= h_size - 1, w_low >= 0), \
                            bottom_data[n, c, h_high, w_low], zero)

    v4 = te.if_then_else(
        tvm.tir.all(h_high <= h_size - 1, w_high <= w_size - 1),
        bottom_data[n, c, h_high, w_high], zero)

    w1 = hh * hw
    w2 = hh * lw
    w3 = lh * hw
    w4 = lh * lw

    val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
    return val
Beispiel #2
0
 def _bilinear_sample(n, c, h, w):
     x = grid[n, 0, h, w]
     y = grid[n, 1, h, w]
     y = (y + 1) * (in_height - 1) / 2
     x = (x + 1) * (in_width - 1) / 2
     x0 = te.floor(x).astype('int32')
     y0 = te.floor(y).astype('int32')
     x1 = x0 + tir.const(1, 'int32')
     y1 = y0 + tir.const(1, 'int32')
     return _get_pixel_value(n, c, y0, x0) * (1.0 - (y - y0)) * (1.0 - (x - x0)) \
         + _get_pixel_value(n, c, y0, x1) * (1.0 - (y - y0)) * (x - x0) \
         + _get_pixel_value(n, c, y1, x0) * (y - y0) * (1.0 - (x - x0)) \
         + _get_pixel_value(n, c, y1, x1) * (y - y0) * (x - x0)
Beispiel #3
0
    def _bilinear_sample(n, c, h, w):
        y, x = _compute_source_index(n, h, w)
        y0 = te.floor(y).astype("int32")
        x0 = te.floor(x).astype("int32")
        y1 = y0 + tir.const(1, "int32")
        x1 = x0 + tir.const(1, "int32")

        return (
            _get_pixel_value(n, c, y0, x0) * (1.0 - (y - y0)) * (1.0 - (x - x0))
            + _get_pixel_value(n, c, y0, x1) * (1.0 - (y - y0)) * (x - x0)
            + _get_pixel_value(n, c, y1, x0) * (y - y0) * (1.0 - (x - x0))
            + _get_pixel_value(n, c, y1, x1) * (y - y0) * (x - x0)
        )
Beispiel #4
0
def get_closest_index(in_x, rounding_method, boxes):
    """get the closest index to a value based on a certain rounding method"""
    if rounding_method == "round" or boxes is not None:
        closest_x_index = te.round(in_x).astype("int32")
    elif rounding_method == "round_prefer_floor":
        closest_x_index = te.ceil(in_x - 0.5).astype("int32")
    elif rounding_method == "round_prefer_ceil":
        closest_x_index = te.floor(in_x + 0.5).astype("int32")
    elif rounding_method == "floor":
        # Add epsilon to floor to prevent gpu rounding errors.
        epsilon = 1e-5
        closest_x_index = te.floor(in_x + epsilon).astype("int32")
    elif rounding_method == "ceil":
        # Subract epsilon from ceil to prevent gpu rounding errors.
        epsilon = 1e-5
        closest_x_index = te.ceil(in_x - epsilon).astype("int32")
    else:
        raise ValueError("Uknown rounding method: {}".format(rounding_method))
    return closest_x_index
Beispiel #5
0
    def _trilinear_sample(n, c, d, h, w):
        z, y, x = _compute_source_index(n, d, h, w)
        z0 = te.floor(z).astype("int32")
        y0 = te.floor(y).astype("int32")
        x0 = te.floor(x).astype("int32")
        z1 = z0 + tir.const(1, "int32")
        y1 = y0 + tir.const(1, "int32")
        x1 = x0 + tir.const(1, "int32")

        return (
            _get_pixel_value(n, c, z0, y0, x0) * (1 - (x - x0)) * (1 - (y - y0)) * (1 - (z - z0))
            + _get_pixel_value(n, c, z0, y0, x1) * (x - x0) * (1 - (y - y0)) * (1 - (z - z0))
            + _get_pixel_value(n, c, z1, y1, x0) * (1 - (x - x0)) * (y - y0) * (z - z0)
            + _get_pixel_value(n, c, z1, y1, x1) * (x - x0) * (y - y0) * (z - z0)
            + _get_pixel_value(n, c, z0, y1, x0) * (1 - (x - x0)) * (y - y0) * (1 - (z - z0))
            + _get_pixel_value(n, c, z1, y0, x1) * (x - x0) * (1 - (y - y0)) * (z - z0)
            + _get_pixel_value(n, c, z1, y0, x0) * (1 - (x - x0)) * (1 - (y - y0)) * (z - z0)
            + _get_pixel_value(n, c, z0, y1, x1) * (x - x0) * (y - y0) * (1 - (z - z0))
        )
Beispiel #6
0
    def _pool(i, c, ph, pw):
        roi = rois[i]
        batch_index = roi[0].astype("int32")
        roi_start_w, roi_start_h, roi_end_w, roi_end_h = roi[1], roi[2], roi[
            3], roi[4]

        roi_start_h = te.round(roi_start_h * spatial_scale).astype("int32")
        roi_start_w = te.round(roi_start_w * spatial_scale).astype("int32")
        roi_end_h = te.round(roi_end_h * spatial_scale).astype("int32")
        roi_end_w = te.round(roi_end_w * spatial_scale).astype("int32")

        # force malformed ROIs to be 1x1
        roi_h = tvm.te.max(roi_end_h - roi_start_h + 1,
                           tvm.tir.const(1, "int32"))
        roi_w = tvm.te.max(roi_end_w - roi_start_w + 1,
                           tvm.tir.const(1, "int32"))

        bin_h = roi_h.astype(dtype) / pooled_size_h
        bin_w = roi_w.astype(dtype) / pooled_size_w

        # use epsilon to prevent floating point precision loss in floor/ceil
        epsilon = tvm.tir.const(0.00001, dtype)
        hstart = te.floor(ph * bin_h + epsilon).astype("int32")
        wstart = te.floor(pw * bin_w + epsilon).astype("int32")
        hend = te.ceil((ph + 1) * bin_h - epsilon).astype("int32")
        wend = te.ceil((pw + 1) * bin_w - epsilon).astype("int32")
        hstart = tvm.te.min(tvm.te.max(hstart + roi_start_h, 0), height)
        wstart = tvm.te.min(tvm.te.max(wstart + roi_start_w, 0), width)
        hend = tvm.te.min(tvm.te.max(hend + roi_start_h, 0), height)
        wend = tvm.te.min(tvm.te.max(wend + roi_start_w, 0), width)

        non_empty = tvm.tir.all(hstart < hend, wstart < wend)
        min_value = lambda dtype: tvm.tir.if_then_else(
            non_empty, tvm.te.min_value(dtype), tvm.tir.const(0.0, dtype))
        # pylint: disable=unnecessary-lambda
        _max = te.comm_reducer(lambda x, y: tvm.te.max(x, y),
                               min_value,
                               name="max")
        rh = te.reduce_axis((0, hend - hstart), "rh")
        rw = te.reduce_axis((0, wend - wstart), "rw")
        return _max(data[batch_index, c, hstart + rh, wstart + rw],
                    axis=[rh, rw])
Beispiel #7
0
    def _nearest_neighbor(*indices):
        n, c, z, y, x, cc = _get_indices(*indices)

        in_z = z_ratio * z
        in_y = y_ratio * y
        in_x = x_ratio * x

        if coordinate_transformation_mode == "align_corners":
            zint = te.round(in_z).astype('int32')
            yint = te.round(in_y).astype('int32')
            xint = te.round(in_x).astype('int32')
        elif coordinate_transformation_mode in ["asymmetric", "half_pixel"]:
            # Add epsilon to floor to prevent gpu rounding errors.
            epsilon = 1e-5
            zint = te.floor(in_z + epsilon).astype('int32')
            yint = te.floor(in_y + epsilon).astype('int32')
            xint = te.floor(in_x + epsilon).astype('int32')
        else:
            raise ValueError("Unsupported coordinate_transformation_mode: {}".format(
                coordinate_transformation_mode))

        return _cast_output(_get_pixel(n, c, zint, yint, xint, cc))
Beispiel #8
0
def floor(x):
    """Take floor of input x.

    Parameters
    ----------
    x : tvm.te.Tensor
        Input argument.

    Returns
    -------
    y : tvm.te.Tensor
        The result.
    """
    return te.compute(x.shape, lambda *i: te.floor(x(*i)))
Beispiel #9
0
    def _trilinear(*indices):
        n, c, z, y, x, cc = _get_indices(*indices)

        if coordinate_transformation_mode == "half_pixel":
            in_z = z_ratio * (z + 0.5) - 0.5
            in_y = y_ratio * (y + 0.5) - 0.5
            in_x = x_ratio * (x + 0.5) - 0.5
        else:
            in_z = z_ratio * z
            in_y = y_ratio * y
            in_x = x_ratio * x

        zint = te.floor(in_z).astype('int32')
        zfract = in_z - te.floor(in_z)

        xint = te.floor(in_x).astype('int32')
        xfract = in_x - te.floor(in_x)

        yint = te.floor(in_y).astype('int32')
        yfract = in_y - te.floor(in_y)

        p000 = _get_pixel(n, c, zint, yint, xint, cc)
        p001 = _get_pixel(n, c, zint, yint, xint + 1, cc)
        p010 = _get_pixel(n, c, zint, yint + 1, xint, cc)
        p011 = _get_pixel(n, c, zint, yint + 1, xint + 1, cc)
        p100 = _get_pixel(n, c, zint + 1, yint, xint, cc)
        p101 = _get_pixel(n, c, zint + 1, yint, xint + 1, cc)
        p110 = _get_pixel(n, c, zint + 1, yint + 1, xint, cc)
        p111 = _get_pixel(n, c, zint + 1, yint + 1, xint + 1, cc)

        dep00 = _lerp(p000, p100, zfract)
        dep01 = _lerp(p001, p101, zfract)
        dep10 = _lerp(p010, p110, zfract)
        dep11 = _lerp(p011, p111, zfract)
        col0 = _lerp(dep00, dep01, xfract)
        col1 = _lerp(dep10, dep11, xfract)
        value = _lerp(col0, col1, yfract)
        return _cast_output(value)
Beispiel #10
0
def _resize_2d(
    indices,
    data,
    roi,
    image_height,
    image_width,
    target_height,
    target_width,
    boxes=None,
    box_indices=None,
    method=None,
    extrapolation_value=0.0,
    layout="NCHW",
    coordinate_transformation_mode="align_corners",
    rounding_method="",
    alpha=-0.5,
    exclude_outside=0,
    out_dtype=None,
):
    """Perform resize operation on the data with selected method and options.

    Parameters
    ----------
    indices : tuple
        The indices of input data

    data : tvm.te.Tensor
        inputs is a 4-D tensor with shape
        [batch, channel, in_height, in_width]
        or  [batch, in_height, in_width, channel]

    roi: Tuple of Float or Expr
        The region of interest for cropping the input image. Expected to be of
        size 4, and format [start_h, start_w, end_h, end_w].
        Only used if coordinate_transformation_mode is tf_crop_and_resize.

    image_height : integer
        Input image height

    image_width : integer
        Input image width

    target_height : integer
        The target resized image height

    target_width : integer
        The target resized image width

    boxes : tvm.te.Tensor, optional
        A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies
        the coordinates of a box.

    method: string, optional
        method of interpolation ("nearest", "linear", "bicubic")

    box_indices : tvm.te.Tensor, optional
        A 1-D tensor of shape [num_boxes], box_indices[i] specifies the data that
        the i-th box refers to.

    extrapolation_value: float, optional
        Value used for extrapolation, when applicable.

    layout: string, optional
        "NCHW", "NHWC", or "NCHWc".

    coordinate_transformation_mode : string, optional
        Describes how to transform the coordinate in the resized tensor
        to the coordinate in the original tensor.
        [half_pixel, align_corners, asymmetric, pytorch_half_pixel,
        tf_half_pixel_for_nn, and tf_crop_and_resize].

    rounding_method: string, optional
        indicates how to find the "nearest" pixel in nearest_neighbor method
        [round, floor, ceil]

    alpha: float, optional
        Bicubic spline coefficient

    exclude_outside: bool, optional:
        Exclude values outside the image fdor bicubic interpolation

    out_dtype: string, optional
        Type to return. If left None will be same as input type.

    Returns
    -------
    output : out_dtype
        The computed result with type out_dtype
    """
    def _cast_output(value, data_dtype="float32", out_dtype=None):
        if out_dtype:
            dtype = out_dtype
        else:
            dtype = data_dtype
        return value.astype(dtype)

    height_use_int_div = False
    width_use_int_div = False
    if method == "nearest_neighbor" and coordinate_transformation_mode == "asymmetric":
        height_use_int_div = can_convert_multiply_to_intdiv(
            image_height, target_height)
        width_use_int_div = can_convert_multiply_to_intdiv(
            image_width, target_width)

    n, c, y, x, cc, inum, ic = get_2d_indices(indices, layout)
    box_idx = box_indices(n) if box_indices is not None else n
    if boxes is not None:
        y1, x1 = boxes(n, 0), boxes(n, 1)
        y2, x2 = boxes(n, 2), boxes(n, 3)

        in_h = (image_height - 1) * (y2 - y1)
        in_w = (image_width - 1) * (x2 - x1)
        h_scale = in_h.astype("float") / (target_height - 1)
        w_scale = in_w.astype("float") / (target_width - 1)

        in_y = y1 * (image_height - 1) + h_scale * y
        in_x = x1 * (image_width - 1) + w_scale * x
    else:
        in_x = get_inx(
            x,
            image_width,
            target_width,
            coordinate_transformation_mode,
            roi[1],
            roi[3],
            width_use_int_div,
        )
        in_y = get_inx(
            y,
            image_height,
            target_height,
            coordinate_transformation_mode,
            roi[0],
            roi[2],
            height_use_int_div,
        )

    if method == "nearest_neighbor":
        if rounding_method == "":
            if coordinate_transformation_mode == "align_corners":
                rounding_method = "round"
            else:
                rounding_method = "floor"

        closest_x_index = get_closest_index(in_x, rounding_method, boxes,
                                            width_use_int_div)
        closest_y_index = get_closest_index(in_y, rounding_method, boxes,
                                            height_use_int_div)

        value = get_2d_pixel(
            data,
            layout,
            image_height,
            image_width,
            box_idx,
            c,
            closest_y_index,
            closest_x_index,
            cc,
            inum,
            ic,
        )
    elif method == "linear":
        y_int = te.floor(in_y).astype("int32")
        x_int = te.floor(in_x).astype("int32")

        y_lerp = in_y - y_int
        x_lerp = in_x - x_int

        p = [[0 for i in range(2)] for j in range(2)]
        for j in range(2):
            for i in range(2):
                p[j][i] = get_2d_pixel(
                    data,
                    layout,
                    image_height,
                    image_width,
                    box_idx,
                    c,
                    y_int + j,
                    x_int + i,
                    cc,
                    inum,
                    ic,
                )

        top = _lerp(*p[0], x_lerp)
        bottom = _lerp(*p[1], x_lerp)
        value = _lerp(top, bottom, y_lerp)

    elif method == "cubic":
        xint = te.floor(in_x).astype("int32")
        xfract = in_x - te.floor(in_x)

        yint = te.floor(in_y).astype("int32")
        yfract = in_y - te.floor(in_y)

        # Get the surrounding values
        p = [[0 for i in range(4)] for j in range(4)]
        for j in range(4):
            for i in range(4):
                p[j][i] = get_2d_pixel(
                    data,
                    layout,
                    image_height,
                    image_width,
                    box_idx,
                    c,
                    yint + j - 1,
                    xint + i - 1,
                    cc,
                    inum,
                    ic,
                )

        wx = _cubic_spline_weights(xfract, alpha)
        wy = _cubic_spline_weights(yfract, alpha)
        if exclude_outside:
            for i in range(4):
                wx[i] = te.if_then_else(
                    te.any(xint - 1 + i < 0, xint + i > image_width), 0.0,
                    wx[i])
                wy[i] = te.if_then_else(
                    te.any(yint - 1 + i < 0, yint + i > image_height), 0.0,
                    wy[i])
            sum_wx = sum(wx)
            sum_wy = sum(wy)
            wx = [w / sum_wx for w in wx]
            wy = [w / sum_wy for w in wy]
        col0 = _cubic_kernel(p[0], wx)
        col1 = _cubic_kernel(p[1], wx)
        col2 = _cubic_kernel(p[2], wx)
        col3 = _cubic_kernel(p[3], wx)
        value = _cubic_kernel([col0, col1, col2, col3], wy)

    else:
        raise ValueError("Unknown resize method:", method)

    if coordinate_transformation_mode == "tf_crop_and_resize":
        out = tvm.tir.if_then_else(
            in_y < 0,
            extrapolation_value,
            tvm.tir.if_then_else(in_y > image_height - 1, extrapolation_value,
                                 value),
        )
        # use extrapolation_value if in_x is out of boundary
        value = tvm.tir.if_then_else(
            in_x < 0,
            extrapolation_value,
            tvm.tir.if_then_else(in_x > image_width - 1, extrapolation_value,
                                 out),
        )
    return _cast_output(value, data.dtype, out_dtype=out_dtype)
Beispiel #11
0
def _resize_1d(
    indices,
    data,
    roi,
    image_width,
    target_width,
    boxes=None,
    box_indices=None,
    method=None,
    extrapolation_value=0.0,
    layout="NCW",
    coordinate_transformation_mode="align_corners",
    rounding_method="",
    alpha=-0.5,
    exclude_outside=0,
    out_dtype=None,
):
    """Perform resize operation on the data with selected method and options.

    Parameters
    ----------
    indices : tuple
        The indices of input data

    data : tvm.te.Tensor
        inputs is a 3-D tensor with shape
        [batch, channel, in_width]
        or  [batch, in_width, channel]

    roi: Tuple of Float or Expr
        The region of interest for cropping the input image. Expected to be of
        size 2, and format [start_w, end_w].
        Only used if coordinate_transformation_mode is tf_crop_and_resize.

    image_width : integer
        Input image width

    target_width : integer
        The target resized image width

    boxes : tvm.te.Tensor, optional
        A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies
        the coordinates of a box.

    box_indices : tvm.te.Tensor, optional
        A 1-D tensor of shape [num_boxes], box_indices[i] specifies the data that
        the i-th box refers to.

    extrapolation_value: float, optional
        Value used for extrapolation, when applicable.

    layout: string, optional
        "NCW", "NWC", or "NCWc".

    method: string, optional
        method of interpolation ("nearest", "linear", "bicubic")

    coordinate_transformation_mode : string, optional
        Describes how to transform the coordinate in the resized tensor
        to the coordinate in the original tensor.
        [half_pixel, align_corners, asymmetric, pytorch_half_pixel,
        tf_half_pixel_for_nn, and tf_crop_and_resize].

    rounding_method: string, optional
        indicates how to find the "nearest" pixel in nearest_neighbor method
        [round, floor, ceil]

    alpha: float, optional
        Bicubic spline coefficient

    exclude_outside: bool, optional:
        Exclude values outside the image fdor bicubic interpolation

    out_dtype: string, optional
        Type to return. If left None will be same as input type.

    Returns
    -------
    output : out_dtype
        The computed result with type out_dtype
    """
    def _cast_output(value, data_dtype="float32", out_dtype=None):
        if out_dtype:
            dtype = out_dtype
        else:
            dtype = data_dtype
        return value.astype(dtype)

    n, c, x, cc, inum, ic = get_1d_indices(indices, layout)
    box_idx = box_indices(n) if box_indices is not None else n
    if boxes is not None:
        # TODO(mbrookhart): Find an example of this
        raise NotImplementedError(
            "resize1d with image boxes not yet implemented")
    in_x = get_inx(
        x,
        image_width,
        target_width,
        coordinate_transformation_mode,
        roi[0],
        roi[1],
    )

    if method == "nearest_neighbor":
        if rounding_method == "":
            if coordinate_transformation_mode == "align_corners":
                rounding_method = "round"
            else:
                rounding_method = "floor"

        closest_x_index = get_closest_index(in_x, rounding_method, boxes)

        value = get_1d_pixel(
            data,
            layout,
            image_width,
            box_idx,
            c,
            closest_x_index,
            cc,
            inum,
            ic,
        )
    elif method == "linear":
        x_int = te.floor(in_x).astype("int32")

        x_lerp = in_x - x_int

        p = [0 for i in range(2)]
        for i in range(2):
            p[i] = get_1d_pixel(
                data,
                layout,
                image_width,
                box_idx,
                c,
                x_int + i,
                cc,
                inum,
                ic,
            )

        value = _lerp(*p, x_lerp)

    elif method == "cubic":
        xint = te.floor(in_x).astype("int32")
        xfract = in_x - te.floor(in_x)

        # Get the surrounding values
        p = [0 for i in range(4)]
        for i in range(4):
            p[i] = get_1d_pixel(
                data,
                layout,
                image_width,
                box_idx,
                c,
                xint + i - 1,
                cc,
                inum,
                ic,
            )

        wx = _cubic_spline_weights(xfract, alpha)
        if exclude_outside:
            for i in range(4):
                wx[i] = te.if_then_else(
                    te.any(xint - 1 + i < 0, xint + i > image_width), 0.0,
                    wx[i])
            sum_wx = sum(wx)
            wx = [w / sum_wx for w in wx]
        value = _cubic_kernel(p, wx)

    else:
        raise ValueError("Unknown resize method:", method)

    if coordinate_transformation_mode == "tf_crop_and_resize":
        # use extrapolation_value if in_x is out of boundary
        value = tvm.tir.if_then_else(
            in_x < 0,
            extrapolation_value,
            tvm.tir.if_then_else(in_x > image_width - 1, extrapolation_value,
                                 value),
        )
    return _cast_output(value, data.dtype, out_dtype=out_dtype)
Beispiel #12
0
def _resize_3d(
    indices,
    data,
    roi,
    image_depth,
    image_height,
    image_width,
    target_depth,
    target_height,
    target_width,
    boxes=None,
    box_indices=None,
    method=None,
    extrapolation_value=0.0,
    layout="NCHW",
    coordinate_transformation_mode="align_corners",
    rounding_method="",
    alpha=-0.5,
    exclude_outside=0,
    out_dtype=None,
):
    """Perform resize operation on the data with selected method and options.

    Parameters
    ----------
    indices : tuple
        The indices of input data

    data : tvm.te.Tensor
        inputs is a 4-D tensor with shape
        [batch, channel, in_height, in_width]
        or  [batch, in_height, in_width, channel]

    roi: Tuple of Float or Expr
        The region of interest for cropping the input image. Expected to be of
        size 6, and format [start_d, start_h, start_w, end_d, end_h, end_w].
        Only used if coordinate_transformation_mode is tf_crop_and_resize.

    image_depth : integer
        Input image depth

    image_height : integer
        Input image height

    image_width : integer
        Input image width

    target_depth : integer
        The target resized image depth

    target_height : integer
        The target resized image height

    target_width : integer
        The target resized image width

    boxes : tvm.te.Tensor, optional
        A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies
        the coordinates of a box.

    box_indices : tvm.te.Tensor, optional
        A 1-D tensor of shape [num_boxes], box_indices[i] specifies the data that
        the i-th box refers to.

    method: string, optional
        method of interpolation ("nearest", "linear", "bicubic")

    extrapolation_value: float, optional
        Value used for extrapolation, when applicable.

    layout: string, optional
        "NCHW", "NHWC", or "NCHWc".

    coordinate_transformation_mode : string, optional
        Describes how to transform the coordinate in the resized tensor
        to the coordinate in the original tensor.
        [half_pixel, align_corners, asymmetric, pytorch_half_pixel,
        tf_half_pixel_for_nn, and tf_crop_and_resize].

    rounding_method: string, optional
        indicates how to find the "nearest" pixel in nearest_neighbor method
        [round, floor, ceil]

    alpha: float, optional
        Bicubic spline coefficient

    exclude_oiutside: bool, optional:
        Exclude values outside the image fdor bicubic interpolation

    out_dtype: string, optional
        Type to return. If left None will be same as input type.

    Returns
    -------
    output : out_dtype
        The computed result with type out_dtype
    """
    def _cast_output(value, data_dtype="float32", out_dtype=None):
        if out_dtype:
            dtype = out_dtype
        else:
            dtype = data_dtype
        return value.astype(dtype)

    n, c, z, y, x, cc = get_3d_indices(indices, layout)
    box_idx = box_indices(n) if box_indices is not None else n
    if boxes is not None:
        # TODO(mbrookhart): Find an example of this
        raise NotImplementedError(
            "resize1d with image boxes not yet implemented")
    in_z = get_inx(z, image_depth, target_depth,
                   coordinate_transformation_mode, roi[2], roi[5])
    in_y = get_inx(y, image_height, target_height,
                   coordinate_transformation_mode, roi[1], roi[4])
    in_x = get_inx(x, image_width, target_width,
                   coordinate_transformation_mode, roi[0], roi[3])

    if method == "nearest_neighbor":
        if rounding_method == "":
            if coordinate_transformation_mode == "align_corners":
                rounding_method = "round"
            else:
                rounding_method = "floor"

        closest_z_index = get_closest_index(in_z, rounding_method, boxes)
        closest_y_index = get_closest_index(in_y, rounding_method, boxes)
        closest_x_index = get_closest_index(in_x, rounding_method, boxes)

        value = get_3d_pixel(
            data,
            layout,
            image_depth,
            image_height,
            image_width,
            box_idx,
            c,
            closest_z_index,
            closest_y_index,
            closest_x_index,
            cc,
        )
    elif method == "linear":
        z_int = te.floor(in_z).astype("int32")
        y_int = te.floor(in_y).astype("int32")
        x_int = te.floor(in_x).astype("int32")

        z_lerp = in_z - z_int
        y_lerp = in_y - y_int
        x_lerp = in_x - x_int

        p = [[[0 for i in range(2)] for j in range(2)] for k in range(2)]
        for k in range(2):
            for j in range(2):
                for i in range(2):
                    p[k][j][i] = get_3d_pixel(
                        data,
                        layout,
                        image_depth,
                        image_height,
                        image_width,
                        box_idx,
                        c,
                        z_int + k,
                        y_int + j,
                        x_int + i,
                        cc,
                    )
        l = [[0 for i in range(2)] for j in range(2)]
        for j in range(2):
            for i in range(2):
                l[j][i] = _lerp(*p[j][i], x_lerp)

        top = _lerp(*l[0], y_lerp)
        bottom = _lerp(*l[1], y_lerp)
        value = _lerp(top, bottom, z_lerp)

    elif method == "cubic":
        zint = te.floor(in_z).astype("int32")
        zfract = in_z - te.floor(in_z)

        yint = te.floor(in_y).astype("int32")
        yfract = in_y - te.floor(in_y)

        xint = te.floor(in_x).astype("int32")
        xfract = in_x - te.floor(in_x)

        # Get the surrounding values
        p = [[[0 for i in range(4)] for j in range(4)] for k in range(4)]
        for k in range(4):
            for j in range(4):
                for i in range(4):
                    p[k][j][i] = get_3d_pixel(
                        data,
                        layout,
                        image_depth,
                        image_height,
                        image_width,
                        box_idx,
                        c,
                        zint + k - 1,
                        yint + j - 1,
                        xint + i - 1,
                        cc,
                    )

            wz = _cubic_spline_weights(zfract, alpha)
            wy = _cubic_spline_weights(yfract, alpha)
            wx = _cubic_spline_weights(xfract, alpha)
            if exclude_outside:
                for i in range(4):
                    wz[i] = te.if_then_else(
                        te.any(xint - 1 + i < 0, xint + i > image_height), 0.0,
                        wx[i])
                    wy[i] = te.if_then_else(
                        te.any(yint - 1 + i < 0, yint + i > image_height), 0.0,
                        wy[i])
                    wx[i] = te.if_then_else(
                        te.any(xint - 1 + i < 0, xint + i > image_width), 0.0,
                        wx[i])
                sum_wz = sum(wz)
                sum_wy = sum(wy)
                sum_wx = sum(wx)
                wz = [w / sum_wz for w in wz]
                wy = [w / sum_wy for w in wy]
                wx = [w / sum_wx for w in wx]

            l = [[0 for i in range(4)] for j in range(4)]
            for j in range(4):
                for i in range(4):
                    l[j][i] = _cubic_kernel(p[j][i], wx)
            col0 = _cubic_kernel(l[0], wy)
            col1 = _cubic_kernel(l[1], wy)
            col2 = _cubic_kernel(l[2], wy)
            col3 = _cubic_kernel(l[3], wy)
            value = _cubic_kernel([col0, col1, col2, col3], wz)

    else:
        raise ValueError("Unknown resize method:", method)

    if coordinate_transformation_mode == "tf_crop_and_resize":
        out = tvm.tir.if_then_else(
            in_z < 0,
            extrapolation_value,
            tvm.tir.if_then_else(in_z > image_depth - 1, extrapolation_value,
                                 value),
        )
        out = tvm.tir.if_then_else(
            in_y < 0,
            extrapolation_value,
            tvm.tir.if_then_else(in_y > image_height - 1, extrapolation_value,
                                 value),
        )
        # use extrapolation_value if in_x is out of boundary
        value = tvm.tir.if_then_else(
            in_x < 0,
            extrapolation_value,
            tvm.tir.if_then_else(in_x > image_width - 1, extrapolation_value,
                                 out),
        )
    return _cast_output(value, data.dtype, out_dtype=out_dtype)
def test_basic_operation():
    np.random.seed(0)
    shape = (10, 10)
    x = te.var("x", dtype='float32')
    k = te.reduce_axis((0, 10), name="k")
    l = te.reduce_axis((0, 10), name="l")
    A0 = te.placeholder(shape, name='A0')
    A1 = te.placeholder(shape, name='A1')
    zeros = np.zeros(shape)

    B = te.compute(shape, lambda i, j: A0[i, j], name='B')
    check_grad(B, [A0])

    B = te.compute(shape, lambda i, j: A0[i, j] + A1[i, j], name='B')
    check_grad(B, [A0, A1])

    B = te.compute(shape, lambda i, j: A0[i, j] + A0[j, i], name='B')
    check_grad(B, A0)

    B = te.compute(shape, lambda i, j: te.floor(A0[i, j]), name='B')
    check_grad(B, A0, desired_grads=[zeros])

    B = te.compute(shape, lambda i, j: te.ceil(A0[i, j]), name='B')
    check_grad(B, A0, desired_grads=[zeros])

    B = te.compute(shape, lambda i, j: te.trunc(A0[i, j]), name='B')
    check_grad(B, A0, desired_grads=[zeros])

    B = te.compute(shape, lambda i, j: te.round(A0[i, j]), name='B')
    check_grad(B, A0, desired_grads=[zeros])

    B = te.compute(shape, lambda i, j: A0[i, j] + te.exp(A0[j, i]), name='B')
    check_grad(B, A0)

    B = te.compute(
        shape,
        lambda i, j: te.log(0.1 + te.abs(A0[i, j] + te.exp(A0[j, i]))),
        name='B')
    check_grad(B, A0)

    B = te.compute(shape,
                   lambda i, j: te.sigmoid(A0[i, j] * A0[i, j] * A0[j, i]),
                   name='B')
    check_grad(B, A0)

    B = te.compute(shape,
                   lambda i, j: te.tanh(A0[i, j] * A0[i, j] * A0[j, i]),
                   name='B')
    check_grad(B, A0)

    B = te.compute(shape,
                   lambda i, j: te.sqrt(A0[i, j] * A0[i, j] * A0[j, i]),
                   name='B')
    check_grad(B, A0, data_range=(0.1, 10))

    B = te.compute(shape,
                   lambda i, j: te.power(te.abs(A0[i, j]), A0[j, i]),
                   name='B')
    check_grad(B, A0, data_range=(-4, 4))

    B = te.compute(shape, lambda i, j: A0[i, j] * A0[j, i], name='B')
    check_grad(B, A0)

    B = te.compute((10, ),
                   lambda i: te.sum(A0[i, k] * A0[k, i], axis=k),
                   name='B')
    check_grad(B, A0)

    B = te.compute(shape,
                   lambda i, j: te.sum(A0[i, k] * A0[k, i] + 5, axis=k),
                   name='B')
    check_grad(B, A0)

    B = te.compute(shape,
                   lambda i, j: te.max(A0[i, k] * A0[k, j] + 5, axis=k),
                   name='B')
    check_grad(B, A0)

    B = te.compute(shape,
                   lambda i, j: A0[i, j] * (A1[j, i] + A0[j, i]),
                   name='B')
    check_grad(B, [A0, A1])

    B = te.compute(shape,
                   lambda i, j: te.sum(
                       A0[k, k] - A0[te.min(j + k, 9), j] * A0[i, k], axis=k),
                   name='B')
    check_grad(B, A0)

    def fcombine(x, y):
        return x * y

    def fidentity(t0):
        return tvm.tir.const(1, t0)

    prod = te.comm_reducer(fcombine, fidentity, name='prod')
    B = te.compute((10, 10),
                   lambda i, j: prod(A0[i, k] + A0[k, i], axis=k),
                   name='B')
    check_grad(B, A0)

    X = te.placeholder((10, ), name='X')
    A = te.compute((10, ), lambda i: X[i] + X[9 - i])
    B = te.compute((10, ), lambda i: X[i] * X[9 - i])
    Y = topi.tensordot(A, B, 1)
    check_grad(Y, X)
Beispiel #14
0
def resize_nearest_neighbor(indices,
                            data,
                            image_height,
                            image_width,
                            target_height,
                            target_width,
                            boxes=None,
                            box_indices=None,
                            extrapolation_value=None,
                            layout='NCHW',
                            coordinate_transformation_mode="align_corners",
                            out_dtype=None):
    """Perform resize operation with nearest neighbor method on the data.
    For details about Nearest-neighbor interpolation please refer to
    https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.

    Parameters
    ----------
    indices : tuple
        The indices of input data

    data : tvm.te.Tensor
        inputs is a 4-D tensor with shape
        [batch, channel, in_height, in_width]
        or  [batch, in_height, in_width, channel]

    image_height : integer
        Input image height

    image_width : integer
        Input image width

    target_height : integer
        The target resized image height

    target_width : integer
        The target resized image width

    boxes : tvm.te.Tensor, optional
        A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies
        the coordinates of a box.

    box_indices : tvm.te.Tensor, optional
        A 1-D tensor of shape [num_boxes], box_indices[i] specifies the data that
        the i-th box refers to.

    extrapolation_value: float, optional
        Value used for extrapolation, when applicable.

    layout: string, optional
        "NCHW", "NHWC", or "NCHWc".

    coordinate_transformation_mode: string, optional
        Describes how to transform the coordinate in the resized tensor
        to the coordinate in the original tensor.
        Refer to the ONNX Resize operator specification for details.
        Available options are "half_pixel", "align_corners" and "asymmetric".

    out_dtype: string, optional
        Type to return. If left None will be same as input type.

    Returns
    -------
    output : out_dtype
        The computed result with type out_dtype
    """
    def _cast_output(value, data_dtype="float32", out_dtype=None):
        if out_dtype:
            dtype = out_dtype
        else:
            dtype = data_dtype
        return value.astype(dtype)

    n, c, y, x, cc, inum, ic = get_2d_indices(indices, layout)
    box_idx = box_indices(n) if box_indices is not None else n
    if boxes is not None:
        y1, x1 = boxes(n, 0), boxes(n, 1)
        y2, x2 = boxes(n, 2), boxes(n, 3)

        in_h = (image_height - 1) * (y2 - y1)
        in_w = (image_width - 1) * (x2 - x1)
        h_scale = in_h.astype('float') / (target_height - 1)
        w_scale = in_w.astype('float') / (target_width - 1)

        in_y = y1 * (image_height - 1) + h_scale * y
        in_x = x1 * (image_width - 1) + w_scale * x
    else:
        if coordinate_transformation_mode == "align_corners":
            h_scale = (image_height - 1).astype('float') / (target_height - 1)
            w_scale = (image_width - 1).astype('float') / (target_width - 1)
        elif coordinate_transformation_mode in ["asymmetric", "half_pixel"]:
            h_scale = image_height.astype('float') / target_height
            w_scale = image_width.astype('float') / target_width
        else:
            raise ValueError(
                "Unsupported coordinate_transformation_mode: {}".format(
                    coordinate_transformation_mode))
        in_y = h_scale * y
        in_x = w_scale * x

    if coordinate_transformation_mode == "align_corners" or boxes is not None:
        closest_x_index = te.round(in_x).astype("int32")
        closest_y_index = te.round(in_y).astype("int32")
    else:
        # Add epsilon to floor to prevent gpu rounding errors.
        epsilon = 1e-5
        closest_y_index = te.floor(in_y + epsilon).astype('int32')
        closest_x_index = te.floor(in_x + epsilon).astype('int32')

    value = get_2d_pixel(data, layout, boxes, image_height, image_width,
                         box_idx, c, closest_y_index, closest_x_index, cc,
                         inum, ic)

    if extrapolation_value is not None:
        out = tvm.tir.if_then_else(
            in_y < 0, extrapolation_value,
            tvm.tir.if_then_else(in_y > image_height - 1, extrapolation_value,
                                 value))
        # use extrapolation_value if in_x is out of boundary
        value = tvm.tir.if_then_else(
            in_x < 0, extrapolation_value,
            tvm.tir.if_then_else(in_x > image_width - 1, extrapolation_value,
                                 out))
    return _cast_output(value, data.dtype, out_dtype=out_dtype)
Beispiel #15
0
def resize_bicubic(indices,
                   data,
                   image_height,
                   image_width,
                   target_height,
                   target_width,
                   boxes=None,
                   box_indices=None,
                   extrapolation_value=None,
                   layout='NCHW',
                   coordinate_transformation_mode="align_corners",
                   out_dtype=None):
    """Perform resize operation with bicubic method on the data.
    More details about Bicubic interpolation please refer to
    https://en.wikipedia.org/wiki/Bicubic_interpolation.

    Parameters
    ----------
    indices : tuple
        The indices of input data

    data : tvm.te.Tensor
        inputs is a 4-D tensor with shape
        [batch, channel, in_height, in_width]
        or  [batch, in_height, in_width, channel]

    image_height : integer
        Input image height

    image_width : integer
        Input image width

    target_height : integer
        The target resized image height

    target_width : integer
        The target resized image width

    boxes : tvm.te.Tensor, optional
        A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies
        the coordinates of a box.

    box_indices : tvm.te.Tensor, optional
        A 1-D tensor of shape [num_boxes], box_indices[i] specifies the data that
        the i-th box refers to.

    extrapolation_value: float, optional
        Value used for extrapolation, when applicable.

    layout: string, optional
        "NCHW", "NHWC", or "NCHWc".

    coordinate_transformation_mode: string, optional
        Describes how to transform the coordinate in the resized tensor
        to the coordinate in the original tensor.
        Refer to the ONNX Resize operator specification for details.
        Available options are "half_pixel", "align_corners" and "asymmetric".

    out_dtype: string, optional
        Type to return. If left None will be same as input type.

    Returns
    -------
    output : out_dtype
        The computed result with type out_dtype
    """
    def _cubic_kernel(A, B, C, D, t):
        a = -A / 2.0 + (3.0 * B) / 2.0 - (3.0 * C) / 2.0 + D / 2.0
        b = A - (5.0 * B) / 2.0 + 2.0 * C - D / 2.0
        c = -A / 2.0 + C / 2.0
        d = B
        return a * t * t * t + b * t * t + c * t + d

    def _cast_output(value, data_dtype="float32", out_dtype=None):
        if out_dtype:
            dtype = out_dtype
        else:
            dtype = data_dtype
        return value.astype(dtype)

    n, c, y, x, cc, inum, ic = get_2d_indices(indices, layout)
    box_idx = box_indices(n) if box_indices is not None else n

    if boxes is not None:
        y1, x1 = boxes(n, 0), boxes(n, 1)
        y2, x2 = boxes(n, 2), boxes(n, 3)

        in_h = (image_height - 1) * (y2 - y1)
        in_w = (image_width - 1) * (x2 - x1)
        h_scale = in_h.astype('float') / (target_height - 1)
        w_scale = in_w.astype('float') / (target_width - 1)

        in_y = y1 * (image_height - 1) + h_scale * y
        in_x = x1 * (image_width - 1) + w_scale * x
    else:
        if coordinate_transformation_mode == "align_corners":
            h_scale = (image_height - 1).astype('float') / (target_height - 1)
            w_scale = (image_width - 1).astype('float') / (target_width - 1)
        elif coordinate_transformation_mode in ["asymmetric", "half_pixel"]:
            h_scale = image_height.astype('float') / target_height
            w_scale = image_width.astype('float') / target_width
        else:
            raise ValueError(
                "Unsupported coordinate_transformation_mode: {}".format(
                    coordinate_transformation_mode))

        if coordinate_transformation_mode == "half_pixel":
            in_y = h_scale * (y + 0.5) - 0.5
            in_x = w_scale * (x + 0.5) - 0.5
        else:
            in_y = h_scale * y
            in_x = w_scale * x

    xint = te.floor(in_x).astype('int32')
    xfract = in_x - te.floor(in_x)

    yint = te.floor(in_y).astype('int32')
    yfract = in_y - te.floor(in_y)

    # 1st row
    p00 = _get_pixel(data, layout, boxes, image_height, image_width, box_idx,
                     c, yint - 1, xint - 1, cc, inum, ic)
    p10 = _get_pixel(data, layout, boxes, image_height, image_width, box_idx,
                     c, yint - 1, xint + 0, cc, inum, ic)
    p20 = _get_pixel(data, layout, boxes, image_height, image_width, box_idx,
                     c, yint - 1, xint + 1, cc, inum, ic)
    p30 = _get_pixel(data, layout, boxes, image_height, image_width, box_idx,
                     c, yint - 1, xint + 2, cc, inum, ic)

    # 2nd row
    p01 = _get_pixel(data, layout, boxes, image_height, image_width, box_idx,
                     c, yint + 0, xint - 1, cc, inum, ic)
    p11 = _get_pixel(data, layout, boxes, image_height, image_width, box_idx,
                     c, yint + 0, xint + 0, cc, inum, ic)
    p21 = _get_pixel(data, layout, boxes, image_height, image_width, box_idx,
                     c, yint + 0, xint + 1, cc, inum, ic)
    p31 = _get_pixel(data, layout, boxes, image_height, image_width, box_idx,
                     c, yint + 0, xint + 2, cc, inum, ic)

    # 3rd row
    p02 = _get_pixel(data, layout, boxes, image_height, image_width, box_idx,
                     c, yint + 1, xint - 1, cc, inum, ic)
    p12 = _get_pixel(data, layout, boxes, image_height, image_width, box_idx,
                     c, yint + 1, xint + 0, cc, inum, ic)
    p22 = _get_pixel(data, layout, boxes, image_height, image_width, box_idx,
                     c, yint + 1, xint + 1, cc, inum, ic)
    p32 = _get_pixel(data, layout, boxes, image_height, image_width, box_idx,
                     c, yint + 1, xint + 2, cc, inum, ic)

    # 4th row
    p03 = _get_pixel(data, layout, boxes, image_height, image_width, box_idx,
                     c, yint + 2, xint - 1, cc, inum, ic)
    p13 = _get_pixel(data, layout, boxes, image_height, image_width, box_idx,
                     c, yint + 2, xint + 0, cc, inum, ic)
    p23 = _get_pixel(data, layout, boxes, image_height, image_width, box_idx,
                     c, yint + 2, xint + 1, cc, inum, ic)
    p33 = _get_pixel(data, layout, boxes, image_height, image_width, box_idx,
                     c, yint + 2, xint + 2, cc, inum, ic)

    # Interpolate bicubically
    col0 = _cubic_kernel(p00, p10, p20, p30, xfract)
    col1 = _cubic_kernel(p01, p11, p21, p31, xfract)
    col2 = _cubic_kernel(p02, p12, p22, p32, xfract)
    col3 = _cubic_kernel(p03, p13, p23, p33, xfract)
    value = _cubic_kernel(col0, col1, col2, col3, yfract)

    # use extrapolation_value if in_y/in_x is out of boundary
    if extrapolation_value is not None:
        out = tvm.tir.if_then_else(
            in_y < 0, extrapolation_value,
            tvm.tir.if_then_else(in_y > image_height - 1, extrapolation_value,
                                 value))
        value = tvm.tir.if_then_else(
            in_x < 0, extrapolation_value,
            tvm.tir.if_then_else(in_x > image_width - 1, extrapolation_value,
                                 out))
    return _cast_output(value, data.dtype, out_dtype=out_dtype)
Beispiel #16
0
    def _bicubic_sample(n, c, h, w):
        A = -0.75  # 0.75 is used in pytorch, it maybe different in other frameworks

        def cubic_weight_1(fraction):
            return ((A + 2) * fraction - (A + 3)) * fraction * fraction + 1

        def cubic_weight_2(fraction):
            return ((A * fraction - 5 * A) * fraction + 8 * A) * fraction - 4 * A

        def cubic_interp_1d(pixel_0, pixel_1, pixel_2, pixel_3, fraction):
            weights = [0] * 4
            weights[0] = cubic_weight_2(fraction + 1)
            weights[1] = cubic_weight_1(fraction)
            weights[2] = cubic_weight_1(1 - fraction)
            weights[3] = cubic_weight_2(2 - fraction)
            return (
                pixel_0 * weights[0]
                + pixel_1 * weights[1]
                + pixel_2 * weights[2]
                + pixel_3 * weights[3]
            )

        y = grid[n, 1, h, w]
        x = grid[n, 0, h, w]
        y, x = _unnormalize(y, x)
        y_floor = te.floor(y).astype("int32")
        x_floor = te.floor(x).astype("int32")
        y_fraction = y - y_floor
        x_fraction = x - x_floor

        coefficients = [0] * 4

        for i in range(4):
            y_ = y_floor - 1 + i
            x_0 = x_floor - 1
            x_1 = x_floor + 0
            x_2 = x_floor + 1
            x_3 = x_floor + 2

            if padding_mode == "border":
                y_ = _clip_coordinates(y_, in_height).astype("int32")
                x_0 = _clip_coordinates(x_0, in_width).astype("int32")
                x_1 = _clip_coordinates(x_1, in_width).astype("int32")
                x_2 = _clip_coordinates(x_2, in_width).astype("int32")
                x_3 = _clip_coordinates(x_3, in_width).astype("int32")

            elif padding_mode == "reflection":
                y_ = _reflect_coordinates(y_, in_height)
                x_0 = _reflect_coordinates(x_0, in_width)
                x_1 = _reflect_coordinates(x_1, in_width)
                x_2 = _reflect_coordinates(x_2, in_width)
                x_3 = _reflect_coordinates(x_3, in_width)

                y_ = _clip_coordinates(y_, in_height).astype("int32")
                x_0 = _clip_coordinates(x_0, in_width).astype("int32")
                x_1 = _clip_coordinates(x_1, in_width).astype("int32")
                x_2 = _clip_coordinates(x_2, in_width).astype("int32")
                x_3 = _clip_coordinates(x_3, in_width).astype("int32")

            coefficients[i] = cubic_interp_1d(
                _get_pixel_value(n, c, y_, x_0),
                _get_pixel_value(n, c, y_, x_1),
                _get_pixel_value(n, c, y_, x_2),
                _get_pixel_value(n, c, y_, x_3),
                x_fraction,
            )

        return cubic_interp_1d(
            coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_fraction
        )
Beispiel #17
0
def resize_bicubic(
    indices,
    data,
    image_height,
    image_width,
    target_height,
    target_width,
    boxes=None,
    box_indices=None,
    extrapolation_value=None,
    layout="NCHW",
    coordinate_transformation_mode="align_corners",
    out_dtype=None,
    alpha=-0.5,
    exclude_outside=0,
):
    """Perform resize operation with bicubic method on the data.
    More details about Bicubic interpolation please refer to
    https://en.wikipedia.org/wiki/Bicubic_interpolation.
    This algorithm is doing a bicubic spline interpolation

    Parameters
    ----------
    indices : tuple
        The indices of input data

    data : tvm.te.Tensor
        inputs is a 4-D tensor with shape
        [batch, channel, in_height, in_width]
        or  [:batch, in_height, in_width, channel]

    image_height : integer
        Input image height

    image_width : integer
        Input image width

    target_height : integer
        The target resized image height

    target_width : integer
        The target resized image width

    boxes : tvm.te.Tensor, optional
        A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies
        the coordinates of a box.

    box_indices : tvm.te.Tensor, optional
        A 1-D tensor of shape [num_boxes], box_indices[i] specifies the data that
        the i-th box refers to.

    extrapolation_value: float, optional
        Value used for extrapolation, when applicable.

    layout: string, optional
        "NCHW", "NHWC", or "NCHWc".

    coordinate_transformation_mode: string, optional
        Describes how to transform the coordinate in the resized tensor
        to the coordinate in the original tensor.
        Refer to the ONNX Resize operator specification for details.
        Available options are "half_pixel", "align_corners" and "asymmetric".

    out_dtype: string, optional
        Type to return. If left None will be same as input type.

    alpha: float, optional
        Bicubic spline coefficient

    Returns
    -------
    output : out_dtype
        The computed result with type out_dtype
    """
    def _cast_output(value, data_dtype="float32", out_dtype=None):
        if out_dtype:
            dtype = out_dtype
        else:
            dtype = data_dtype
        return value.astype(dtype)

    n, c, y, x, cc, inum, ic = get_2d_indices(indices, layout)
    box_idx = box_indices(n) if box_indices is not None else n

    if boxes is not None:
        y1, x1 = boxes(n, 0), boxes(n, 1)
        y2, x2 = boxes(n, 2), boxes(n, 3)

        in_h = (image_height - 1) * (y2 - y1)
        in_w = (image_width - 1) * (x2 - x1)
        h_scale = in_h.astype("float") / (target_height - 1)
        w_scale = in_w.astype("float") / (target_width - 1)

        in_y = y1 * (image_height - 1) + h_scale * y
        in_x = x1 * (image_width - 1) + w_scale * x
    else:
        in_y, in_x = get_iny_inx(
            y,
            x,
            image_height,
            image_width,
            target_height,
            target_width,
            coordinate_transformation_mode,
        )

    xint = te.floor(in_x).astype("int32")
    xfract = in_x - te.floor(in_x)

    yint = te.floor(in_y).astype("int32")
    yfract = in_y - te.floor(in_y)

    # Get the surrounding values
    p = [[0 for i in range(4)] for j in range(4)]
    for j in range(4):
        for i in range(4):
            p[j][i] = get_2d_pixel(
                data,
                layout,
                boxes,
                image_height,
                image_width,
                box_idx,
                c,
                yint + j - 1,
                xint + i - 1,
                cc,
                inum,
                ic,
            )

    # Interpolate bicubically
    def _cubic_spline_weights(t):
        t2 = t * t
        t3 = t * t * t
        w1 = alpha * (t3 - 2 * t2 + t)
        w2 = (alpha + 2) * t3 - (3 + alpha) * t2 + 1
        w3 = -(alpha + 2) * t3 + (3 + 2 * alpha) * t2 - alpha * t
        w4 = -alpha * t3 + alpha * t2
        return [w1, w2, w3, w4]

    def _cubic_kernel(inputs, w):
        return sum([a_i * w_i for a_i, w_i in zip(inputs, w)])

    wx = _cubic_spline_weights(xfract)
    wy = _cubic_spline_weights(yfract)
    if exclude_outside:
        for i in range(4):
            wx[i] = te.if_then_else(
                te.any(xint - 1 + i < 0, xint + i > image_width), 0.0, wx[i])
            wy[i] = te.if_then_else(
                te.any(yint - 1 + i < 0, yint + i > image_height), 0.0, wy[i])
        sum_wx = sum(wx)
        sum_wy = sum(wy)
        wx = [w / sum_wx for w in wx]
        wy = [w / sum_wy for w in wy]
    col0 = _cubic_kernel(p[0], wx)
    col1 = _cubic_kernel(p[1], wx)
    col2 = _cubic_kernel(p[2], wx)
    col3 = _cubic_kernel(p[3], wx)
    value = _cubic_kernel([col0, col1, col2, col3], wy)

    # use extrapolation_value if in_y/in_x is out of boundary
    if extrapolation_value is not None:
        out = tvm.tir.if_then_else(
            in_y < 0,
            extrapolation_value,
            tvm.tir.if_then_else(in_y > image_height - 1, extrapolation_value,
                                 value),
        )
        value = tvm.tir.if_then_else(
            in_x < 0,
            extrapolation_value,
            tvm.tir.if_then_else(in_x > image_width - 1, extrapolation_value,
                                 out),
        )
    return _cast_output(value, data.dtype, out_dtype=out_dtype)
Beispiel #18
0
def resize_bilinear(
    indices,
    data,
    image_height,
    image_width,
    target_height,
    target_width,
    boxes=None,
    box_indices=None,
    extrapolation_value=None,
    layout="NCHW",
    coordinate_transformation_mode="align_corners",
    out_dtype=None,
):
    """Perform resize operation with bilinear method on the data.
    For details about Bilinear interpolation please refer to
    https://en.wikipedia.org/wiki/Bilinear_interpolation.

    Parameters
    ----------
    indices : tuple
        The indices of input data

    data : tvm.te.Tensor
        inputs is a 4-D tensor with shape
        [batch, channel, in_height, in_width]
        or  [batch, in_height, in_width, channel]

    image_height : integer
        Input image height

    image_width : integer
        Input image width

    target_height : integer
        The target resized image height

    target_width : integer
        The target resized image width

    boxes : tvm.te.Tensor, optional
        A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies
        the coordinates of a box.

    box_indices : tvm.te.Tensor, optional
        A 1-D tensor of shape [num_boxes], box_indices[i] specifies the data that
        the i-th box refers to.

    extrapolation_value: float, optional
        Value used for extrapolation, when applicable.

    layout: string, optional
        "NCHW", "NHWC", or "NCHWc".

    coordinate_transformation_mode: string, optional
        Describes how to transform the coordinate in the resized tensor
        to the coordinate in the original tensor.
        Refer to the ONNX Resize operator specification for details.
        Available options are "half_pixel", "align_corners" and "asymmetric".

    out_dtype: string, optional
        Type to return. If left None will be same as input type.

    Returns
    -------
    output : out_dtype
        The computed result with type out_dtype
    """
    def _cast_output(value, data_dtype="float32", out_dtype=None):
        if out_dtype:
            dtype = out_dtype
        else:
            dtype = data_dtype
        return value.astype(dtype)

    def _lerp(A, B, t):
        return A * (1.0 - t) + B * t

    n, c, y, x, cc, inum, ic = get_2d_indices(indices, layout=layout)
    box_idx = box_indices(n) if box_indices is not None else n

    if boxes is not None:
        y1, x1 = boxes(n, 0), boxes(n, 1)
        y2, x2 = boxes(n, 2), boxes(n, 3)

        in_h = (image_height - 1) * (y2 - y1)
        in_w = (image_width - 1) * (x2 - x1)
        h_scale = in_h.astype("float") / (target_height - 1)
        w_scale = in_w.astype("float") / (target_width - 1)

        in_y = y1 * (image_height - 1) + h_scale * y
        in_x = x1 * (image_width - 1) + w_scale * x
    else:
        in_y, in_x = get_iny_inx(
            y,
            x,
            image_height,
            image_width,
            target_height,
            target_width,
            coordinate_transformation_mode,
        )

    top_y_index = te.floor(in_y).astype("int32")
    bottom_y_index = te.ceil(in_y).astype("int32")
    y_lerp = in_y - top_y_index

    left_x_index = te.floor(in_x).astype("int32")
    right_x_index = te.ceil(in_x).astype("int32")
    x_lerp = in_x - left_x_index

    top_left = get_2d_pixel(
        data,
        layout,
        boxes,
        image_height,
        image_width,
        box_idx,
        c,
        top_y_index,
        left_x_index,
        cc,
        inum,
        ic,
    )
    top_right = get_2d_pixel(
        data,
        layout,
        boxes,
        image_height,
        image_width,
        box_idx,
        c,
        top_y_index,
        right_x_index,
        cc,
        inum,
        ic,
    )
    bottom_left = get_2d_pixel(
        data,
        layout,
        boxes,
        image_height,
        image_width,
        box_idx,
        c,
        bottom_y_index,
        left_x_index,
        cc,
        inum,
        ic,
    )
    bottom_right = get_2d_pixel(
        data,
        layout,
        boxes,
        image_height,
        image_width,
        box_idx,
        c,
        bottom_y_index,
        right_x_index,
        cc,
        inum,
        ic,
    )

    top = _lerp(top_left, top_right, x_lerp)
    bottom = _lerp(bottom_left, bottom_right, x_lerp)
    value = _lerp(top, bottom, y_lerp)

    # use extrapolation_value if in_y/in_x is out of boundary
    if extrapolation_value is not None:
        out = tvm.tir.if_then_else(
            in_y < 0,
            extrapolation_value,
            tvm.tir.if_then_else(in_y > image_height - 1, extrapolation_value,
                                 value),
        )
        value = tvm.tir.if_then_else(
            in_x < 0,
            extrapolation_value,
            tvm.tir.if_then_else(in_x > image_width - 1, extrapolation_value,
                                 out),
        )
    return _cast_output(value, data.dtype, out_dtype=out_dtype)