Пример #1
0
def xdivy_grad_compute(placeholders, shape_max, dtype, rx, ry):
    """
    Do element-wise xdivy_grad compute.

    Args:
        placeholders (Union[list, typle]): the placeholder of data input
        shape_max (Union[list, typle]): the shape of broadcast
        dtype (string): the type of data input
        rx (list): the reduction indices of data input with broadcast
        ry (list): the reduction indices for data input with broadcast

    Returns:
        output_y1 (tvm.tensor.Tensor): result of xdivy_grad
        output_y2 (tvm.tensor.Tensor): result of xdivy_grad
    """
    x1_ori = placeholders[0]
    x2_ori = placeholders[1]
    grad_ori = placeholders[2]

    if dtype == "float16":
        x1 = akg.lang.cce.cast_to(x1_ori, "float32")
        x2 = akg.lang.cce.cast_to(x2_ori, "float32")
        grad = akg.lang.cce.cast_to(grad_ori, "float32")
        x1 = akg.lang.cce.broadcast(x1, shape_max)
        x2 = akg.lang.cce.broadcast(x2, shape_max)
        grad = akg.lang.cce.broadcast(grad, shape_max)
    else:
        x1 = akg.lang.cce.broadcast(x1_ori, shape_max)
        x2 = akg.lang.cce.broadcast(x2_ori, shape_max)
        grad = akg.lang.cce.broadcast(grad_ori, shape_max)

    esp_min = tvm.const(1.18e-38, dtype="float32")
    x1_addepsmin = akg.lang.cce.vadds(x1, esp_min)

    if utils.product_is_mini():
        x1_addepsmin_rec = reciprocal(x1_addepsmin)
        not_zero_x1 = akg.lang.cce.vmul(x1, x1_addepsmin_rec)
        x2_rec = reciprocal(x2)
        partial_x1 = akg.lang.cce.vmul(not_zero_x1, x2_rec)
    else:
        not_zero_x1 = div(x1, x1_addepsmin)
        partial_x1 = div(not_zero_x1, x2)

    partial_x1g = akg.lang.cce.vmul(partial_x1, grad)

    neg_one = tvm.const(-1, dtype="float32")
    neg_x1 = akg.lang.cce.vmuls(x1, neg_one)
    partial_x1pow = akg.lang.cce.vmul(partial_x1, partial_x1)
    partial_x2 = akg.lang.cce.vmul(neg_x1, partial_x1pow)
    partial_x2g = akg.lang.cce.vmul(partial_x2, grad)

    output_y1 = akg.lang.cce.sum(partial_x1g, rx, keepdims=True)
    output_y2 = akg.lang.cce.sum(partial_x2g, ry, keepdims=True)

    if dtype == "float16":
        output_y1 = akg.lang.cce.cast_to(output_y1, "float16")
        output_y2 = akg.lang.cce.cast_to(output_y2, "float16")

    return output_y1, output_y2
def apply_proximal_gradient_descent_impl(var, alpha, l1, l2, delta):
    """implement the FOBOS algorithm"""

    dtype = var.dtype
    compute_type = dtype
    if dtype == "float16":
        # cast to float32 for higher accuracy
        compute_type = "float32"
        var, alpha, l1, l2, delta = [
            akg.topi.cast(t, compute_type)
            for t in [var, alpha, l1, l2, delta]
        ]

    shape = var.shape
    alpha = akg.topi.broadcast_to(alpha, shape)
    l1 = akg.topi.broadcast_to(l1, shape)
    l2 = akg.topi.broadcast_to(l2, shape)
    # prox_var = var - alpha * delta
    prox_var = akg.tvm.compute(
        shape,
        lambda *indice: var(*indice) - alpha(*indice) * delta(*indice),
        name="prox_var")

    # l1>0: var_new = sign(prox_var)/(1+alpha*l2) * max{|prox_var|-alpha*l1,0}
    sign_prox_var = sign(prox_var)
    alpha_l2_1 = akg.topi.add(akg.tvm.const(1, compute_type),
                              akg.topi.multiply(alpha, l2))
    max_value = akg.tvm.compute(
        shape,
        lambda *indice: akg.tvm.max(
            akg.tvm.abs(prox_var(*indice)) - alpha(*indice) * l1(*indice),
            akg.tvm.const(0, compute_type)),
        name="max_value")
    var_new_l1_gt_0 = akg.topi.multiply(div(sign_prox_var, alpha_l2_1),
                                        max_value)

    # l1<=0: var_new = prox_var/(1+alpha*l2)
    var_new_l1_le_0 = div(prox_var, alpha_l2_1)

    if utils.product_is_mini():
        var_new = akg.tvm.compute(
            shape,
            lambda *indice: akg.tvm.expr.Select(
                l1(*indice).astype("float16") > akg.tvm.const(0, "float16"),
                var_new_l1_gt_0(*indice).astype("float16"),
                var_new_l1_le_0(*indice).astype("float16")),
            name="var_new")
    else:
        var_new = akg.tvm.compute(
            shape,
            lambda *indice: akg.tvm.expr.Select(
                l1(*indice) > akg.tvm.const(0, l1.dtype),
                var_new_l1_gt_0(*indice), var_new_l1_le_0(*indice)),
            name="var_new")

    # cast to origin dtype
    if var_new.dtype != dtype:
        var_new = akg.topi.cast(var_new, dtype)
    return var_new
Пример #3
0
def xlogy_grad_compute(placeholders, shape_max, dtype, rx, ry):
    """
    do element-wise xlogy_grad compute

    Args:
        placeholders (Union[list, typle]): the placeholder of data input
        shape_max (Union[list, typle]): the shape of broadcast
        dtype (string): the type of data input
        rx (list): the reduction indices of data input with broadcast
        ry (list): the reduction indices for data input with broadcast

    Returns
        output_y1 (tvm.tensor.Tensor): result of xlogy_grad
        output_y2 (tvm.tensor.Tensor): result of xlogy_grad
    """
    x1_ori = placeholders[0]
    x2_ori = placeholders[1]
    grad_ori = placeholders[2]

    if dtype == "float16":
        x1 = akg.lang.cce.cast_to(x1_ori, "float32")
        x2 = akg.lang.cce.cast_to(x2_ori, "float32")
        grad = akg.lang.cce.cast_to(grad_ori, "float32")
        x1 = akg.lang.cce.broadcast(x1, shape_max)
        x2 = akg.lang.cce.broadcast(x2, shape_max)
        grad = akg.lang.cce.broadcast(grad, shape_max)
    else:
        x1 = akg.lang.cce.broadcast(x1_ori, shape_max)
        x2 = akg.lang.cce.broadcast(x2_ori, shape_max)
        grad = akg.lang.cce.broadcast(grad_ori, shape_max)

    esp_min = tvm.const(1.18e-38, dtype="float32")
    x1_addespmin = akg.lang.cce.vadds(x1, esp_min)

    if utils.product_is_mini():
        not_zero_x1 = akg.lang.cce.vmul(x1, reciprocal(x1_addespmin))
        log_x2 = tvm.compute(
            x2.shape,
            lambda *i: (tvm.log(x2(*i).astype("float16"))).astype("float32"),
            name="log_x2")
    else:
        not_zero_x1 = div(x1, x1_addespmin)
        log_x2 = akg.lang.cce.vlog(x2)

    partial_x1 = akg.lang.cce.vmul(not_zero_x1, log_x2)
    partial_x1g = akg.lang.cce.vmul(partial_x1, grad)

    partial_x2 = div(x1, x2) if not utils.product_is_mini() else \
        akg.lang.cce.vmul(x1, reciprocal(x2))
    partial_x2g = akg.lang.cce.vmul(partial_x2, grad)

    output_y1 = akg.lang.cce.sum(partial_x1g, rx, keepdims=True)
    output_y2 = akg.lang.cce.sum(partial_x2g, ry, keepdims=True)

    if dtype == "float16":
        output_y1 = akg.lang.cce.cast_to(output_y1, "float16")
        output_y2 = akg.lang.cce.cast_to(output_y2, "float16")
    return output_y1, output_y2
Пример #4
0
def apply_ftrl_impl(var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power, with_l2_shrinkage=False):
    """Ftrl-proximal Optimization algorithm"""

    dtype = var.dtype
    # cast to float32 for higher accuracy
    compute_dtype = dtype
    if dtype == "float16":
        compute_dtype = "float32"
        var, accum, linear, grad, lr, l1, l2, lr_power = [akg.topi.cast(t, compute_dtype) for t in
                                                          [var, accum, linear, grad, lr, l1, l2, lr_power]]
        if with_l2_shrinkage:
            l2_shrinkage = akg.topi.cast(l2_shrinkage, compute_dtype)

    shape = var.shape
    # grad_shrinkage = grad + 2 * l2_shrinkage * var
    if with_l2_shrinkage:
        l2_shrinkage = akg.topi.broadcast_to(l2_shrinkage, shape)
        grad_shrinkage = akg.tvm.compute(shape, lambda *indice:
                                         grad(*indice) + akg.tvm.const(2.0, compute_dtype) * l2_shrinkage(*indice) *
                                         var(*indice), name="grad_shrinkage")
    else:
        grad_shrinkage = grad

    # accum_new = accum + grad^2
    accum_new = akg.tvm.compute(shape, lambda *indice: accum(*indice) + grad(*indice)*grad(*indice), name="accum_new")

    # linear_new = linear +  grad_shrinkage - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
    lr_power_neg = akg.topi.negative(lr_power)
    accum_new_pow = akg.topi.power(accum_new, lr_power_neg)
    accum_pow = akg.topi.power(accum, lr_power_neg)
    accum_pow_sub = akg.topi.subtract(accum_new_pow, accum_pow)
    accum_pow_sub_div_lr = div(accum_pow_sub, lr)
    linear_add_shrinkage = akg.topi.add(linear, grad_shrinkage)
    linear_new = akg.tvm.compute(shape, lambda *indice:
                                 linear_add_shrinkage(*indice) - accum_pow_sub_div_lr(*indice)*var(*indice),
                                 name="linear_new")

    # x = clip(linear_new, -l1, l1) - linear_new
    l1_neg = akg.topi.negative(l1)
    linear_new_clip = akg.topi.minimum(akg.topi.maximum(linear_new, l1_neg), l1)
    x_res = akg.topi.subtract(linear_new_clip, linear_new)
    # y = accum_new^(-lr_power) / lr + 2 * l2
    accum_new_pow_div_lr = div(accum_new_pow, lr)
    l2_2 = akg.topi.multiply(l2, 2)
    y_res = akg.topi.add(accum_new_pow_div_lr, l2_2)
    # var_new = x / y
    var_new = div(x_res, y_res)

    # cast to original type
    if dtype == "float16":
        var_new = akg.topi.cast(var_new, dtype)
        accum_new = akg.topi.cast(accum_new, dtype)
        linear_new = akg.topi.cast(linear_new, dtype)

    return var_new, accum_new, linear_new
Пример #5
0
def _apply_adam_compute(var,
                        m,
                        v,
                        beta1_power,
                        beta2_power,
                        lr,
                        beta1,
                        beta2,
                        epsilon,
                        grad,
                        use_nesterov=False):
    """Compute for adam algorithm"""

    shape = var.shape

    # m_new <- m + (1-beta1)*(grad - m)
    m_new = akg.tvm.compute(shape,
                            lambda *indice: m(*indice) + (1 - beta1[0]) *
                            (grad(*indice) - m(*indice)),
                            name="m_new")

    # v_new <- v + (1-beta2)*(grad*grad-v)
    v_new = akg.tvm.compute(shape,
                            lambda *indice: v(*indice) + (1 - beta2[0]) *
                            (grad(*indice) * grad(*indice) - v(*indice)),
                            name="v_new")

    # lr_t <- lr*sqrt(1-beta2_power)/(1-beta1_power)
    one_const = akg.tvm.const(1, var.dtype)
    sqrt_value_beta2 = sqrt(akg.topi.subtract(one_const, beta2_power))
    lr_mul_sqrt_value = akg.topi.multiply(lr, sqrt_value_beta2)
    sub_value_beta1 = akg.topi.subtract(one_const, beta1_power)
    lr_t = div(lr_mul_sqrt_value, sub_value_beta1)

    # if use_nersterov: var_new <- var - lr_t*(m_new*beta1 + (1-beta1)*grad) / (epsilon + sqrt(v_new))
    # if not use_nersterov: var_new <- var - lr_t*m_new / (epsilon + sqrt(v_new))
    if use_nesterov:
        lr_t_mul_m_new = akg.tvm.compute(shape,
                                         lambda *indice: lr_t[0] *
                                         (m_new(*indice) * beta1[0] +
                                          (1 - beta1[0]) * grad(*indice)),
                                         name="lr_t_mul_m_new")
    else:
        lr_t_mul_m_new = akg.tvm.compute(
            shape,
            lambda *indice: lr_t[0] * m_new(*indice),
            name="lr_t_mul_m_new")
    sqrt_value_v_new = sqrt(v_new)
    epsilon_add_sqrt_value = akg.topi.add(epsilon, sqrt_value_v_new)
    div_value = div(lr_t_mul_m_new, epsilon_add_sqrt_value)
    var_new = akg.topi.subtract(var, div_value)

    return var_new, m_new, v_new
Пример #6
0
def _tan_2x_multi(input_x, times):
    """calculating tan x by calculating tan (x/2^times) and using double angle formula multiple times"""
    # Calculate tan (x/2^times)
    if input_x.dtype == FLOAT_16 and utils.product_is_mini():
        input_x_divide = topi.multiply(input_x, tvm.const(1.0/(2.0**times), FLOAT_16))
        res = _tan_expand(input_x_divide)
    else:
        input_x_divide = topi.multiply(input_x, 1.0/(2.0**times))
        res = _tan_expand(input_x_divide)
    while times != 0:
        # using double angle formula: tan 2x = 2*tan x/(1-tan x*tan x)
        if input_x.dtype == FLOAT_16 and utils.product_is_mini():
            res_numerator = topi.multiply(res, tvm.const(2.0, FLOAT_16))
            tanx_square = topi.multiply(res, res)
            res_denominator = topi.add(topi.multiply(tanx_square, tvm.const(-1.0, FLOAT_16)), tvm.const(1.0, FLOAT_16))
        else:
            res_numerator = topi.multiply(res, 2.0)
            tanx_square = topi.multiply(res, res)
            res_denominator = topi.add(topi.multiply(tanx_square, -1.0), 1.0)

        if utils.product_is_mini():
            res = mul(res_numerator, reciprocal(res_denominator))
        else:
            res = div(res_numerator, res_denominator)
        times = times - 1
    return res
Пример #7
0
def xdivy_compute(input_x, input_y):
    """xdivy compute"""
    _, _, shape_res = produce_shapes(get_shape(input_x), get_shape(input_y))
    vc_util.check_shape(shape_res)

    dtype = input_x.dtype

    broadcast_x = akg.lang.cce.broadcast(input_x, shape_res)
    broadcast_y = akg.lang.cce.broadcast(input_y, shape_res)
    broadcast_one = akg.lang.cce.broadcast(tvm.const(SCALAR_ONE, dtype),
                                           shape_res, dtype)

    abs_x = akg.lang.cce.vabs(broadcast_x)
    abs_y = akg.lang.cce.vabs(broadcast_y)
    add_x_y = akg.lang.cce.vadd(abs_x, abs_y)

    if dtype == "float32":
        data_min = akg.lang.cce.broadcast(
            tvm.const(MININUM_NUM_FLOAT, dtype=dtype), shape_res, dtype)
    elif dtype == "float16":
        data_min = akg.lang.cce.broadcast(
            tvm.const(MININUM_NUM_HALF, dtype=dtype), shape_res, dtype)

    zero_x_y = akg.lang.cce.vmin(add_x_y, data_min)

    if dtype == "float32":
        data_mul1 = akg.lang.cce.vmuls(
            zero_x_y, tvm.const(MAX_ONE_CONST_FLOAT, dtype=dtype))
        data_mul2 = akg.lang.cce.vmuls(
            data_mul1, tvm.const(MAX_ONE_CONST_FLOAT, dtype=dtype))
        mul_data = akg.lang.cce.vmuls(
            data_mul2, tvm.const(MAX_TWO_CONST_FLOAT, dtype=dtype))
    elif dtype == "float16":
        data_mul1 = akg.lang.cce.vmuls(zero_x_y,
                                       tvm.const(MAX_CONST_HALF, dtype=dtype))
        mul_data = akg.lang.cce.vmuls(data_mul1,
                                      tvm.const(MAX_CONST_HALF, dtype=dtype))

    sub_x_y_zero = akg.lang.cce.vsub(mul_data, broadcast_one)
    abs_x_y_zero = akg.lang.cce.vabs(sub_x_y_zero)
    input_y_revised = akg.lang.cce.vadd(broadcast_y, abs_x_y_zero)

    if dtype == "float16":
        broadcast_x = akg.lang.cce.cast_to(broadcast_x, "float32")
        input_y_revised = akg.lang.cce.cast_to(input_y_revised, "float32")

    res = div(broadcast_x, input_y_revised)

    if dtype == "float16":
        res = akg.lang.cce.cast_to(res, dtype)

    return res
Пример #8
0
def lin_space_compute(input_assist, input_start, input_stop, input_num):
    """inv_grad compute implementation"""
    num_float = akg.lang.cce.cast_to(input_num, "float32")
    num_divided = akg.lang.cce.vadds(num_float, -1.0)

    step_divider = akg.lang.cce.vsub(input_stop, input_start)
    step = div(step_divider, num_divided)

    res_temp = akg.lang.cce.vmul(
        input_assist, akg.lang.cce.broadcast(step, input_assist.shape))
    res = akg.lang.cce.vadd(
        res_temp, akg.lang.cce.broadcast(input_start, input_assist.shape))

    return res
Пример #9
0
def softplus_grad_compute(input_gradients, input_features):
    """compute for calculations of softplus gradients"""
    shape_dy = get_shape(input_gradients)
    shape_x = get_shape(input_features)
    dtype = input_gradients.dtype

    if list(shape_dy) != list(shape_x):
        shape_dy, shape_x, shape_max = produce_shapes(shape_dy, shape_x)
        input_gradients = akg.lang.cce.broadcast(input_gradients, shape_max,
                                                 dtype)
        input_features = akg.lang.cce.broadcast(input_features, shape_max,
                                                dtype)
    else:
        shape_max = shape_dy

    if dtype != "float32":
        input_gradients = akg.lang.cce.cast_to(input_gradients, "float32")
        input_features = akg.lang.cce.cast_to(
            input_features,
            "float16" if utils.product_is_mini() else "float32")

    data_exp_tmp = akg.lang.cce.vexp(input_features)
    data_add_tmp = akg.lang.cce.vadds(data_exp_tmp, SCALAR_ONE)
    data_div_tmp = div(data_exp_tmp, data_add_tmp)
    res_tmp = akg.lang.cce.vmul(input_gradients, data_div_tmp)

    if dtype == "float16":
        res = akg.lang.cce.cast_to(res_tmp, "float16")
    elif dtype == "int32" or dtype == "int8" or dtype == "uint8":
        data_zero = akg.lang.cce.broadcast(tvm.const(0, "float16"), shape_max,
                                           "float16")
        res_min = akg.lang.cce.vmin(res_tmp, data_zero)
        res_max = akg.lang.cce.vmax(res_tmp, data_zero)
        res_max_int = akg.lang.cce.floor(res_max)
        res_min_int = akg.lang.cce.ceil(res_min)
        res = akg.lang.cce.vadd(res_max_int, res_min_int)
    else:
        res = res_tmp

    if dtype == "int8":
        res = akg.lang.cce.cast_to(res, "int8")
    elif dtype == "uint8":
        res = akg.lang.cce.cast_to(res, "uint8")

    return res
Пример #10
0
def _after_res_compute(abs_data):
    """
    compute bessel_i1e for abs value of data greater than or equal to 3.75

    Algrithm:
    t = 3.75 / x
    I1(x) = (1 / sqrt(x))*(0.39894228 - 0.03988024t - 0.00362018t^2
                           + 0.00163801t^3 - 0.01031555t^4 + 0.02282967t^5
                           - 0.02895312t^6 + 0.01787654t^7 - 0.00420059t^8)
    """
    broad_const_limit = akg.lang.cce.broadcast(
        akg.tvm.const(CONST_LIMIT, abs_data.dtype), abs_data.shape)
    data = div(broad_const_limit, abs_data)
    after_res = topi.multiply(data, ITR_AFTER[LEN_AFTER - 1])
    after_res = topi.add(after_res, ITR_AFTER[LEN_AFTER - 2])
    for iter_number in ITR_AFTER[LEN_AFTER - 3::-1]:
        after_res = mul(after_res, data)
        after_res = topi.add(after_res, iter_number)
    abs_data_rsqrt = rsqrt(abs_data)
    after_res = mul(after_res, abs_data_rsqrt)
    return after_res
def fake_quant_with_min_max_vars_per_channel_compute(input_data,
                                                     input_min,
                                                     input_max,
                                                     num_bits=8,
                                                     narrow_range=False):
    """fake_quant_with_min_max_vars_per_channel compute implemention"""
    shape = get_shape(input_data.shape)
    dtype = input_data.dtype
    min_broadcast = akg.lang.cce.broadcast(input_min, shape, dtype)
    max_broadcast = akg.lang.cce.broadcast(input_max, shape, dtype)
    # get nudged_min and nudged_max by nudged_min_max_compute function
    nudged_min_nudged_max = nudged_min_max_compute(min_broadcast,
                                                   max_broadcast, num_bits,
                                                   narrow_range)
    # transform the input between nudged_max and nudged_min
    clamped_tmp = topi.minimum(input_data, nudged_min_nudged_max[1])
    clamped = topi.maximum(clamped_tmp, nudged_min_nudged_max[0])

    # calculate the quantized and dequantized results
    clamped_shifted = topi.subtract(clamped, nudged_min_nudged_max[0])
    if utils.product_is_mini():
        clamped_shifted_div_scale = mul(clamped_shifted,
                                        reciprocal(nudged_min_nudged_max[2]))
    else:
        clamped_shifted_div_scale = div(clamped_shifted,
                                        nudged_min_nudged_max[2])
    result_tmp = topi.add(clamped_shifted_div_scale, dc.half_const(dtype))
    floor_result_tmp = akg.lang.cce.floor(result_tmp)
    if utils.product_is_mini():
        floor_result_tmp = topi.cast(floor_result_tmp, "float16")

    floor_result_tmp = topi.cast(floor_result_tmp, "float32")
    scale_product = topi.multiply(floor_result_tmp, nudged_min_nudged_max[2])
    tmp_res = topi.add(scale_product, nudged_min_nudged_max[0])
    # get bool_both_zero_value by bool_both_zero_compute function
    bool_both_zero_value = bool_both_zero_compute(min_broadcast, max_broadcast)
    res = topi.multiply(tmp_res, bool_both_zero_value)

    return res
Пример #12
0
 def truncatemod_func(a, b):
     """function for truncatemod formula"""
     # For positive numbers, floor and trunc are equivalent
     return akg.topi.subtract(
         a, akg.topi.multiply(b, cast(floor(div(a, b)), b.dtype)))
Пример #13
0
def _erf_compute(input_x):
    r"""
    Compute erf.

    .. math::
        \operatorname{erf}(x) = sign(x) \left(
            1 - (a_1t+a_2t^2+a_3t^3+a_4t^4+a_5t^5) e^{-x^2} + \epsilon(|x|)
            \right), \\
        t = \dfrac{1}{1+p|x|} \\
        \left|\epsilon(|x|)\right| \le 1.5 \times 10^{-7} \\
        where \; p=.3275911 \quad a_1=.254829592 \quad a_2=-.284496736 \\
        a_3=1.421413741 \quad a_4=-1.453152027 \quad a_5=1.061405429

    Args:
        input_x (tvm.tensor.Tensor): Input tensor.

    Returns:
        tvm.tensor.Tensor as rational approximation.
    """

    dtype = input_x.dtype
    shape = get_shape(input_x)

    cst_one = dc.one_const("float32")
    cst_neg_one = dc.neg_one_const("float32")
    cst_p = tvm.const(SCALER_P, "float32")
    cst_a1 = tvm.const(SCALER_A1, "float32")
    cst_a2 = tvm.const(SCALER_A2, "float32")
    cst_a3 = tvm.const(SCALER_A3, "float32")
    cst_a4 = tvm.const(SCALER_A4, "float32")
    cst_a5 = tvm.const(SCALER_A5, "float32")
    fp16_max = tvm.const(SCALER_FP16_MAX, "float32")
    fp16_min = tvm.const(SCALER_FP16_MIN, "float32")

    if dtype == "float16":
        input_x = topi.cast(input_x, "float32")

    # calculate: sign = floor[(x*fp16max) / (|x*fp16max| + fp16min)]
    data_sign_vmuls = topi.multiply(input_x, fp16_max)
    data_sign_abs = topi.abs(data_sign_vmuls)
    data_adds = topi.add(data_sign_abs, fp16_min)
    data_sign_div = div(data_sign_vmuls, data_adds)
    data_round = round_value(data_sign_div)
    # mini device should cast to fp16 first
    if utils.product_is_mini():
        data_round = topi.cast(data_round, "float16")
    tensor_sign = topi.cast(data_round, "float32")

    # t = 1 / (1 + px)
    tensor_abs = topi.abs(input_x)
    one_plus_px = topi.add(cst_one, topi.multiply(tensor_abs, cst_p))
    data_t = div(topi.full(shape, "float32", 1.0), one_plus_px)

    # e^{-x^2}
    abs_square = topi.multiply(tensor_abs, tensor_abs)
    neg_square = topi.multiply(abs_square, cst_neg_one)
    exp_neg_square = exp(neg_square)

    # a1t + a2t^2 + a3t^3 + a4t^4 + a5t^5 = ((((a5t + a4)t + a3)t + a2)t + a1)t
    tmp_a5 = topi.multiply(cst_a5, data_t)
    tmp_a5a4 = topi.multiply(topi.add(tmp_a5, cst_a4), data_t)
    tmp_a5a4a3 = topi.multiply(topi.add(tmp_a5a4, cst_a3), data_t)
    tmp_a5a4a3a2 = topi.multiply(topi.add(tmp_a5a4a3, cst_a2), data_t)
    data_muladd = topi.multiply(topi.add(tmp_a5a4a3a2, cst_a1), data_t)

    # erf = sign(x) * (1 - data_muladd * e^{-x^2})
    erf_res = topi.multiply(
        tensor_sign,
        topi.add(
            cst_one,
            topi.multiply(cst_neg_one,
                          topi.multiply(data_muladd, exp_neg_square))))

    if dtype == "float16":
        erf_res = topi.cast(erf_res, dtype)

    return erf_res
def nudged_min_max_compute(min_broadcast, max_broadcast, num_bits,
                           narrow_range):
    """
    Calculate the maximum and minimum values of the quantization.

    Notes:
        Each channel scale[i] euqal to (max_broadcast[i] - min_broadcast[i]) / (quant_max - quant_min).
        Then compute nudged_zero_point:
                nudged_zero_point = floor(between_min_max_float + 0.5) + less_quant_min_float + more_quant_max_float,
        between_min_max_float is first calculated by:
                zero_point_from_min = (quant_min_float - min_broadcast) / scale,
        then between_min_max_float = zero_point_from_min, which min_broadcast <= zero_point_from_min <= max_broadcast.
        Besides, the value of less_quant_min_float is equal to quant_min or zero, zero_point_from_min < quant_min_float,
        the value is quant_min, else is 0. The same as more_quant_max_float.
        Finally according to scale and nudged_zero_point to compute nudged_min and nudged_max:
                 nudged_min = (quant_min - nudged_zero_point) * scale
                 nudged_max = (quant_max - nudged_zero_point) * scale

    Args:
        min_broadcast (tvm.tensor.Tensor): minimum value to be quantified for each channel.
        max_broadcast (tvm.tensor.Tensor): maximum value to be quantified for each channel.
        num_bits (int): num_bits is the bitwidth of the quantization, range [2,16].
        narrow_range (bool): if True, for each channel, quantized into the quantization range [0, 2^num_bits - 1] else
                      quantized into the quantization range [1, 2^num_bits - 1].

    Returns:
        nudged_min (tvm.tensor.Tensor): The same type and shape as min_broadcast.
        nudged_max (tvm.tensor.Tensor): The same type and shape as max_broadcast.
        scale (tvm.tensor.Tensor): The same type and shape as max_broadcast.
    """

    dtype = min_broadcast.dtype
    quant_min = 1 if narrow_range else 0
    quant_max = (2**num_bits) - 1

    # because of need compute each channel, so quant_min and quant_max need to broadcast.
    quant_min_float = topi.full(min_broadcast.shape, dtype,
                                tvm.const(quant_min, dtype))
    quant_max_float = topi.full(min_broadcast.shape, dtype,
                                tvm.const(quant_max, dtype))

    # caculate each channel max and min difference.
    max_sub_min = topi.subtract(max_broadcast, min_broadcast)
    quant_max_sub_quant_min = topi.subtract(quant_max_float, quant_min_float)
    # compute scale = (max_broadcast - min_broadcast) / (quant_max - quant_min)
    # and min_div_scale = min_broadcast / scale
    if utils.product_is_mini():
        scale = mul(max_sub_min, reciprocal(quant_max_sub_quant_min))
        min_div_scale = mul(min_broadcast, reciprocal(scale))
    else:
        scale = div(max_sub_min, quant_max_sub_quant_min)
        min_div_scale = div(min_broadcast, scale)

    # zero_point_from_min = quant_min_float - min_broadcast / scale
    zero_point_from_min = topi.subtract(quant_min_float, min_div_scale)
    # if zero_point_from_min < quant_min_float, bool_less_quant_min_float = 1 else 0
    bool_less_quant_min_float = less_compare_float32(zero_point_from_min,
                                                     quant_min_float)
    # if quant_max_float < zero_point_from_min, bool_more_quant_max_float = 1 else 0
    bool_more_quant_max_float = less_compare_float32(quant_max_float,
                                                     zero_point_from_min)

    # according to above bool param to select effective value
    less_quant_min_float = topi.multiply(quant_min_float,
                                         bool_less_quant_min_float)
    more_quant_max_float = topi.multiply(quant_max_float,
                                         bool_more_quant_max_float)

    # compute which num is not less than quant_min_float and not large than quant_max_float
    tensor_one = topi.full(min_broadcast.shape, dtype, dc.one_const(dtype))
    bool_not_less_quant_min_float = topi.subtract(tensor_one,
                                                  bool_less_quant_min_float)
    bool_not_more_quant_max_float = topi.subtract(tensor_one,
                                                  bool_more_quant_max_float)
    bool_between_min_max = topi.multiply(bool_not_less_quant_min_float,
                                         bool_not_more_quant_max_float)
    between_min_max_float = topi.multiply(zero_point_from_min,
                                          bool_between_min_max)
    # add 0.5 to num which min <= num <= max and then floor them.
    between_min_max_add_half_one = topi.add(between_min_max_float,
                                            dc.half_const(dtype))
    between_min_max_round = akg.lang.cce.floor(between_min_max_add_half_one)
    if utils.product_is_mini():
        between_min_max_round = topi.cast(between_min_max_round, "float16")

    between_min_max_round = topi.cast(between_min_max_round, "float32")

    # calculate the maximum and minimum values of the quantization
    nudged_zero_point_tmp = topi.add(less_quant_min_float,
                                     more_quant_max_float)
    nudged_zero_point = topi.add(nudged_zero_point_tmp, between_min_max_round)

    nudged_min_tmp = topi.subtract(quant_min_float, nudged_zero_point)
    nudged_max_tmp = topi.subtract(quant_max_float, nudged_zero_point)
    nudged_min = topi.multiply(nudged_min_tmp, scale)
    nudged_max = topi.multiply(nudged_max_tmp, scale)
    res = [nudged_min, nudged_max, scale]

    return res
Пример #15
0
def Div(x, y):
    """div"""
    return div.div(x, y)