Пример #1
0
def minimum(input1, input2):
    """
    Return the min value of two tensors element-wise.

    Note:
        minimum supports broadcasting.

    Args:
        input1: Tensor.
        input2: Tensor. Has the same type as input1.

    Returns:
        Tensor, has the same type as inputs.
    """

    vc_util.ops_dtype_check([input1.dtype, input2.dtype], vc_util.DtypeForDavinci.ALL_TYPES)
    vc_util.elemwise_dtype_check(input1.dtype, input2.dtype)
    dtype = input1.dtype

    shape1 = [x.value for x in input1.shape]
    shape2 = [x.value for x in input2.shape]
    vc_util.check_shape(shape1)
    vc_util.check_shape(shape2)

    vc_util.auto_broadcast_check(shape1, shape2)

    if dtype in ("int8", "uint8"):
        input1 = cast(input1, "float16")
        input2 = cast(input2, "float16")
    res = akg.topi.minimum(input1, input2)
    if dtype in ("int8", "uint8"):
        res = cast(res, dtype)

    return res
Пример #2
0
def pow(data1, data2):
    """
    Computes power(data1,data2) elementwise, broadcast is supported.

    Args:
        data1 (tvm.tensor.Tensor): Tensor.
        data2 (tvm.tensor.Tensor): Tensor of same type as data1, if shape(data2) != shape(data1), broadcast will happen.

    Returns:
        tvm.tensor.Tensor, powered result, with same type as input tensors and broadcasted shape of data1 and data2.
    """
    vc_util.elemwise_dtype_check(data1.dtype, data2.dtype)
    vc_util.check_shape(data1.shape)
    vc_util.check_shape(data2.shape)
    vc_util.auto_broadcast_check(data1.shape, data2.shape)

    in_dtype = data1.dtype
    if in_dtype == 'float16':
        data1 = akg.topi.cast(data1, 'float32')
        data2 = akg.topi.cast(data2, 'float32')
    res = akg.topi.power(data1, data2)
    if in_dtype == 'float16':
        res = akg.topi.cast(res, 'float16')

    return res
Пример #3
0
def select(condition, x1, x2):
    """
    Selects elements from x1 or x2, depending on condition.
    Note:
        every parmas' shape need legal, can support condition's shape broadcast.

    Args:
        condition (tvm.tensor.Tensor): Tensor of type int8, int32, must be 0 or 1.
        x1 (tvm.tensor.Tensor): Tensor of type float16, float32, int8, int32, uint8.
        x2 (tvm.tensor.Tensor): Tensor of type float16, float32, int8, int32, uint8.

    Returns:
        tvm.tensor.Tensor, has the same type and shape as x1.

    """
    shape_x1 = get_shape(x1)
    shape_x2 = get_shape(x2)
    con_shape = get_shape(condition)
    vc_util.elemwise_shape_check(shape_x1, shape_x2)
    vc_util.elemwise_dtype_check(x1.dtype, x2.dtype, [
        vc_util.DtypeForDavinci.ALL_FLOAT, vc_util.DtypeForDavinci.INT8,
        vc_util.DtypeForDavinci.INT32, vc_util.DtypeForDavinci.UINT8
    ])
    vc_util.ops_dtype_check(
        condition.dtype,
        [vc_util.DtypeForDavinci.INT8, vc_util.DtypeForDavinci.INT32])
    vc_util.auto_broadcast_check(con_shape, shape_x1)
    res = select_compute(condition, x1, x2)
    return res
Пример #4
0
def mul(l_input, r_input):
    """
    Calculate x * y element-wise.

    Note:
        mul supports broadcasting.

    Args:
        l_input (tvm.tensor.Tensor): Tensor of type float16, float32.
        r_input (tvm.tensor.Tensor): Tensor of type float16, float32.

    Returns:
        tvm.tensor.Tensor, has the same type as l_input and r_input.
    """
    vc_util.ops_dtype_check([l_input.dtype, r_input.dtype],
                            vc_util.DtypeForDavinci.ALL_FLOAT)

    shape1 = [x.value for x in l_input.shape]
    shape2 = [x.value for x in r_input.shape]
    vc_util.check_shape(shape1)
    vc_util.check_shape(shape2)
    vc_util.auto_broadcast_check(shape1, shape2)
    vc_util.elemwise_dtype_check(l_input.dtype, r_input.dtype)
    output = akg.topi.multiply(l_input, r_input)

    return output
Пример #5
0
def div(data1, data2):
    """
    Calculates x/y, and returns an integer when inputs are all integers.

    When both arguments are integers, use integer division (also known as "floor division").
    When arguments are float numbers, use normal floating point division

    Note:
        div supports broadcasting.

    Args:
        data1 (tvm.tensor.Tensor): Tensor of type float16, float32, int32, int8 and uint8.
        data2 (tvm.tensor.Tensor): Tensor of type float16, float32, int32, int8 and uint8.

    Returns:
        tvm.tensor.Tensor, has the same type as data1 and data2.
    """

    vc_util.ops_dtype_check([data1.dtype, data2.dtype],
                            vc_util.DtypeForDavinci.ALL_TYPES)
    vc_util.elemwise_dtype_check(data1.dtype, data2.dtype)
    dtype = data1.dtype

    shape1 = [x.value for x in data1.shape]
    shape2 = [x.value for x in data2.shape]
    vc_util.check_shape(shape1)
    vc_util.check_shape(shape2)

    vc_util.auto_broadcast_check(shape1, shape2)
    n_shape1, n_shape2, out_shape = produce_shapes(shape1, shape2)
    if n_shape1 != out_shape:
        input1_cast = akg.topi.broadcast_to(data1, out_shape)
    else:
        input1_cast = data1
    if n_shape2 != out_shape:
        input2_cast = akg.topi.broadcast_to(data2, out_shape)
    else:
        input2_cast = data2

    if dtype in ("int32", "int8", "uint8"):
        input1p = cast(input1_cast, "float16")
        input2p = cast(input2_cast, "float16")
    else:
        input1p = input1_cast
        input2p = input2_cast

    if utils.product_is_mini():
        input2p_rec = reciprocal(input2p)
        res = akg.topi.multiply(input1p, input2p_rec)
    else:
        res = akg.topi.divide(input1p, input2p)

    if dtype in ("int8", "uint8"):
        res = floor(res)
        res = cast(res, "float16")
    if dtype in ("int32", "int8", "uint8"):
        res = cast(res, dtype)

    return res
def fake_quant_with_min_max_vars_per_channel(input_data,
                                             input_min,
                                             input_max,
                                             num_bits=8,
                                             narrow_range=False):
    """
    Generate fake_quantize the input_data for every channel.

    Note:
        For input_data last dim must be equal to d. And need to satisfy: input_min <= 0 <= input_max.

    Args:
        input_data (tvm.tensor.Tensor): Tensor of type float32, shape must be equal to [b, d] or [b, h, w, d] or [d].
        input_min (tvm.tensor.Tensor): Tensor of type float32, shape must be equal to [d].
        input_max (tvm.tensor.Tensor): Tensor of type float32, shape must be equal to [d].
        num_bits (int):  The quantization bits, must be int, defaults to 8.
        narror_range (Union[bool, None]): if True, quant_min equal to 1, else 0, defaults to False.

    Returns:
        tvm.tensor.Tensor of same type and shape as input_data.
    """

    # get shape and check
    shape_inputs = get_shape(input_data)
    shape_min = get_shape(input_min)
    shape_max = get_shape(input_max)
    vc_util.elemwise_shape_check(shape_min, shape_max)
    vc_util.auto_broadcast_check(shape_min, shape_inputs)
    if shape_min[0] != shape_inputs[-1]:
        raise RuntimeError(
            "The shapes of min,max and shape_inputs last one dimension should be same!"
        )

    # check dtype
    vc_util.ops_dtype_check(input_data.dtype, vc_util.DtypeForDavinci.FLOAT32)
    vc_util.elemwise_dtype_check(input_min.dtype, input_max.dtype,
                                 vc_util.DtypeForDavinci.FLOAT32)
    # check num_bits range
    if num_bits > 16 or num_bits < 2:
        raise ValueError("numbits should be in range [2, 16]!")

    # get output by fake_quant_with_min_max_vars_per_channel_compute function
    res = fake_quant_with_min_max_vars_per_channel_compute(
        input_data, input_min, input_max, num_bits, narrow_range)
    return res
Пример #7
0
def sub(data1, data2):
    """
    Computes data1 - data2 elementwise, broadcast is supported.

    Args:
        data1 (tvm.tensor.Tensor): Tensor.
        data2 (tvm.tensor.Tensor): Tensor of same type as data1, if shape(data2) != shape(data1), broadcast will happen.

    Returns:
        tvm.tensor.Tensor, subtracted result, with same type as input tensors and broadcasted shape of data1 and data2.
    """
    vc_util.elemwise_dtype_check(data1.dtype, data2.dtype)
    vc_util.check_shape(data1.shape)
    vc_util.check_shape(data2.shape)
    vc_util.auto_broadcast_check(data1.shape, data2.shape)

    res = akg.topi.subtract(data1, data2)

    return res
Пример #8
0
def maximum(data1, data2):
    """
    Take element-wise maximum of two tensors with auto-broadcasting.

    Args:
        data1: tvm.tensor.Tensor
        data2: tvm.tensor.Tensor

    Returns:
        tvm.tensor.Tensor of maximum of two tensors.
    """
    shape1 = [x.value for x in data1.shape]
    shape2 = [x.value for x in data2.shape]
    vc_util.check_shape(shape1)
    vc_util.check_shape(shape2)
    vc_util.auto_broadcast_check(shape1, shape2)
    vc_util.elemwise_dtype_check(data1.dtype, data2.dtype)

    res = topi.maximum(data1, data2)
    return res
Пример #9
0
def divide(lhs, rhs):
    """
    Calculate divide.

    Args:
        lhs: The left tensor.
        rhs: The right tensor.

    Returns:
        tvm.tensor.Tensor.
    """
    shape_l = [x.value for x in lhs.shape]
    shape_r = [x.value for x in rhs.shape]
    vc_util.check_shape(shape_l)
    vc_util.check_shape(shape_r)
    vc_util.auto_broadcast_check(shape_l, shape_r)
    vc_util.elemwise_dtype_check(lhs.dtype, rhs.dtype)
    output = akg.topi.divide(lhs, rhs)

    return output
Пример #10
0
def pow_value(data, scale):
    shape1 = [x.value for x in data.shape]
    shape2 = [x.value for x in scale.shape]

    check_list = ["float16", "float32", "int32", "int8", "uint8"]
    dtype = data.dtype
    if not (dtype.lower() in check_list):
        raise RuntimeError("tile_cce only support %s while dtype is %s" %
                           (",".join(check_list), dtype))

    shape = [x.value for x in data.shape]
    vc_util.check_shape(shape)
    vc_util.auto_broadcast_check(shape1, shape2)
    compute_dtype = "float32"
    if utils.product_is_mini():
        compute_dtype = "float16"
    data = cast(data, compute_dtype)
    scale = cast(scale, compute_dtype)

    C = akg.topi.power(data, scale)
    C = cast(C, dtype)
    return C
Пример #11
0
def sub(data1, data2):
    """
    Computes data1 - data2 elementwise, broadcast is supported.

    Args:
        data1 (tvm.tensor.Tensor): Tensor of type float16, float32, int32.
        data2 (tvm.tensor.Tensor): Tensor of same type as data1, if shape(data2) != shape(data1), broadcast will happen.

    Returns:
        tvm.tensor.Tensor, subtracted result, with same type as input tensors and broadcasted shape of data1 and data2.
    """
    vc_util.ops_dtype_check([data1.dtype, data2.dtype],
                            [vc_util.DtypeForDavinci.ALL_FLOAT, vc_util.DtypeForDavinci.INT32])
    vc_util.elemwise_dtype_check(data1.dtype, data2.dtype)

    vc_util.check_shape(data1.shape)
    vc_util.check_shape(data2.shape)
    vc_util.auto_broadcast_check(data1.shape, data2.shape)

    res = akg.topi.subtract(data1, data2)

    return res
Пример #12
0
def realdiv(input1, input2):
    """
    Returns input1 / input2 element-wise for real types.

    Note:
        Realdiv supports broadcasting.

    Args:
        input1 (tvm.tensor.Tensor): Tensor of type float16, float32.
        input2 (tvm.tensor.Tensor): Tensor of type float16, float32.

    Returns:
        tvm.tensor.Tensor, has the same type of input1 and shaped by broadcasting.
    """
    vc_util.ops_dtype_check([input1.dtype, input2.dtype],
                            vc_util.DtypeForDavinci.ALL_FLOAT)
    vc_util.elemwise_dtype_check(input1.dtype, input2.dtype)

    shape1 = [x.value for x in input1.shape]
    shape2 = [x.value for x in input2.shape]
    vc_util.check_shape(shape1)
    vc_util.check_shape(shape2)

    vc_util.auto_broadcast_check(shape1, shape2)
    n_shape1, n_shape2, out_shape = produce_shapes(shape1, shape2)

    if n_shape1 != out_shape:
        input1_cast = akg.topi.broadcast_to(input1, out_shape)
    else:
        input1_cast = input1
    if n_shape2 != out_shape:
        input2_cast = akg.topi.broadcast_to(input2, out_shape)
    else:
        input2_cast = input2

    res = akg.topi.divide(input1_cast, input2_cast)
    return res