示例#1
0
def _atan_compute(data):
    """compute for atan"""
    dtype = data.dtype

    if dtype == "float16":
        data = topi.cast(data, "float32")

    abs_data = topi.abs(data)
    tensor_one = dc.one_const(abs_data.dtype)

    abs_data_sub_one = topi.subtract(abs_data, tensor_one)
    abs_data_add_one = topi.add(abs_data, tensor_one)
    abs_data2 = topi.abs(topi.divide(abs_data_sub_one, abs_data_add_one))

    # calucate data less than one
    res = _do_atan_taylor(abs_data)
    # calucate data more than one
    res_mt_one = topi.add(_do_atan_taylor(abs_data2),
                          tvm.const(CONST_PI_BY_FOUR, abs_data2.dtype))
    res = topi.minimum(res, res_mt_one)

    if utils.product_is_mini() and data.dtype == "float32":
        sign_mask = topi.cast(topi.sign(topi.cast(data, "float16")), "float32")
    else:
        sign_mask = topi.sign(data)

    res = topi.multiply(res, sign_mask)

    if dtype == "float16":
        res = topi.cast(res, "float16")

    return res
示例#2
0
def bool_both_zero_compute(juduged_min, juduged_max):
    """if input min and max are both zero then output_data will be all zero,so need a juduge compute tensor"""
    dtype = juduged_min.dtype
    tensor_zero = topi.full(juduged_min.shape, dtype, dc.zero_const(dtype))
    min_abs = topi.abs(juduged_min)
    max_abs = topi.abs(juduged_max)
    min_max_replace = topi.add(min_abs, max_abs)
    # just check wether min and max are all zero, if true  return 0
    bool_min_max_product_less_zero = less_compare_float32(
        min_max_replace, tensor_zero)
    bool_min_max_product_more_zero = less_compare_float32(
        tensor_zero, min_max_replace)
    bool_both_zero = topi.add(bool_min_max_product_less_zero,
                              bool_min_max_product_more_zero)

    return bool_both_zero
示例#3
0
def _compute_mini(data_input, shape):
    """
    Use log and taylor to compute
    arctanh has the feature: arctanh(-abs(x)) = -arctanh(abs(x))
    """

    data_abs = topi.abs(data_input)
    result_ln = _compute_log(data_abs)
    result_taylor = _compute_taylor(data_abs)

    data_abs = topi.cast(data_abs, "float16")
    data_input = topi.cast(data_input, "float16")
    result_taylor = topi.cast(result_taylor, "float16")
    result_ln = topi.cast(result_ln, "float16")
    # when |x| < 0.5 using taylor computing, and when 0.5<|x|<1 using log()
    data_res = tvm.compute(shape,
                           lambda *i : akg.tvm.expr.Select(data_abs(*i) < dc.half_const("float16"),
                                                           result_taylor(*i),
                                                           result_ln(*i)),
                           name="le")

    # arctanh has the feature: arctanh(-abs(x)) = -arctanh(abs(x))
    data_res_neg = topi.multiply(data_res, dc.neg_one_const("float16"))
    data_res = tvm.compute(shape,
                           lambda *i : akg.tvm.expr.Select(data_input(*i) < dc.zero_const("float16"),
                                                           data_res_neg(*i),
                                                           data_res(*i)),
                           name="neg")
    return data_res
示例#4
0
def matrix_set_diag_compute(input_matrix, input_diagonal, input_help):
    """matrix_set_diag compute implemention"""
    shape_input = get_shape(input_matrix)
    input_dtype = input_matrix.dtype

    if input_dtype == "int8" or input_dtype == "uint8":
        input_matrix = topi.cast(input_matrix, "float16")
        input_diagonal = topi.cast(input_diagonal, "float16")
        input_help = topi.cast(input_help, "float16")
    if input_dtype == "int32" and product_is_mini():
        input_matrix = topi.cast(input_matrix, "float16")
        input_diagonal = topi.cast(input_diagonal, "float16")
        input_help = topi.cast(input_help, "float16")
        input_matrix = topi.cast(input_matrix, "float32")
        input_diagonal = topi.cast(input_diagonal, "float32")
        input_help = topi.cast(input_help, "float32")
    if input_dtype == "int32" and not product_is_mini():
        input_matrix = topi.cast(input_matrix, "float32")
        input_diagonal = topi.cast(input_diagonal, "float32")
        input_help = topi.cast(input_help, "float32")
    diag_tmp = topi.broadcast_to(input_diagonal, shape_input)
    help_tmp = topi.add(input_help, -1)
    help_y = topi.abs(help_tmp)

    res_vmul_x = topi.multiply(input_matrix, help_y)
    res_vmul_y = topi.multiply(diag_tmp, input_help)
    res = topi.add(res_vmul_x, res_vmul_y)

    if input_dtype == "int32" and product_is_mini():
        res = topi.cast(res, "float16")

    res = topi.cast(res, input_dtype)

    return res
示例#5
0
文件: asinh.py 项目: mindspore-ai/akg
def asinh(x, target=utils.CCE):
    r"""
    Compute asinh function.

    .. math:: asinh(x) = log(x+\sqrt{x*x+1})

    Args:
        x (tvm.tensor.Tensor): Tensor of type float16, float32. 

    Returns:
       tvm.tensor.Tensor, has the same type and shape as x.
    
    Supported Platforms:
        'Ascend'
    """
    # check shape
    utils.check_shape(x)

    # check input tensor data_type
    utils.ops_dtype_check(x.dtype, utils.DtypeForDavinci.ALL_FLOAT)
    dtype = x.dtype

    # Known that, asinh(x) = log(x + sqrt(x*x+1)), and, asinh(-x) = -asinh(x)
    # If x is a large negative number, (x + sqrt(x*x+1)) will be close to zero.
    # So, asinh(x) = sign(x) * log(|x| + sqrt(|x|*|x| + 1))
    compute_dtype = dtype
    if dtype == "float16":
        # To avoid overflow and higher accuracy, x is casted to float32
        compute_dtype = "float32"
        x = topi.cast(x, compute_dtype)

    x_abs = topi.abs(x)

    if product_is_mini():
        # sqrt(|x|*|x| + 1) = |x| * sqrt(1 + 1/(|x|*|x|))
        vsquare_add_one = topi.add(1,
                                   topi.divide(1, topi.multiply(x_abs, x_abs)))
        sqrt_compute_value = sqrt_mini_newton_iter_impl(vsquare_add_one)
        sqrt_value = topi.multiply(x_abs, sqrt_compute_value)
    else:
        x_abs_square_add_one = topi.add(topi.multiply(x_abs, x_abs), 1)
        sqrt_value = topi.sqrt(x_abs_square_add_one)

    x_add_sqrt = topi.add(x_abs, sqrt_value)

    if product_is_mini():
        log_value = log_compute_mini_impl(x_add_sqrt, target)
    else:
        log_value = topi.log(x_add_sqrt)

    res = topi.multiply(Sign(x, target), log_value)

    if res.dtype != dtype:
        res = topi.cast(res, dtype)

    if product_is_mini():
        attrs = {"enable_auto_inline": False}
        return res, attrs
    return res
示例#6
0
def _apply_ada_max_compute(var, m, v, grad, lr, beta1, beta1_power, beta2,
                           epsilon):
    """Compute ada_max."""
    # cast to float32 for improved accuracy
    inp_dtype = var.dtype
    if inp_dtype == 'float16':
        var = topi.cast(var, 'float32')
        m = topi.cast(m, 'float32')
        v = topi.cast(v, 'float32')
        lr = topi.cast(lr, 'float32')
        beta1_power = topi.cast(beta1_power, 'float32')
        beta1 = topi.cast(beta1, 'float32')
        beta2 = topi.cast(beta2, 'float32')
        grad = topi.cast(grad, 'float32')
    epsilon = tvm.const(epsilon, 'float32')

    # m += (grad - m) * (1 - beta1)
    rhs = tvm.compute(beta1.shape,
                      lambda *i: beta1(*i) * neg_one_const("float32"))
    rhs = tvm.compute(rhs.shape, lambda *i: rhs(*i) + one_const("float32"))
    lhs = topi.subtract(grad, m)
    rhs = tvm.compute(lhs.shape, lambda *i: lhs(*i) * rhs[0])
    m = topi.add(m, rhs)

    # v = max(beta2*v, abs(grad))
    lhs = tvm.compute(v.shape, lambda *i: v(*i) * beta2[0])
    rhs = topi.abs(grad)
    v = topi.maximum(lhs, rhs)

    # var -= lr / (1 - beta1_power) * (m / (v + epsilon))
    # lr * m / (1 - beta1_power) * (v + epsilon)
    # v + epsilon
    rhs = tvm.compute(v.shape, lambda *i: v(*i) + epsilon)
    # 1 - beta1_power
    lhs = tvm.compute(beta1_power.shape,
                      lambda *i: beta1_power(*i) * neg_one_const("float32"))
    lhs = tvm.compute(lhs.shape, lambda *i: lhs(*i) + one_const("float32"))
    # (1 - beta1_power) * (v + epsilon)
    rhs = tvm.compute(rhs.shape, lambda *i: rhs(*i) * lhs[0])
    # lr * m
    lhs = tvm.compute(m.shape, lambda *i: m(*i) * lr[0])
    # lr * m / (1 - beta1_power) * (v + epsilon)
    rhs = reciprocal(rhs)
    rhs = topi.multiply(lhs, rhs)
    var = topi.subtract(var, rhs)

    if inp_dtype == 'float16':
        var = topi.cast(var, inp_dtype)
        m = topi.cast(m, inp_dtype)
        v = topi.cast(v, inp_dtype)

    return var, m, v
示例#7
0
def _reduce_any_d_compute(x, axis=None, keepdims=None):
    """reduce_any_d compute implemention"""
    dtype = x.dtype
    data_fp16 = topi.cast(x, "float16")
    data_abs = topi.abs(data_fp16)

    res_tmp = akg.lang.ascend.reduce_max(data_abs, axis=axis, keepdims=keepdims)
    shape_len = len(x.shape)
    if axis[-1] == shape_len - 1 and not keepdims:
        res_shape = [x.value for x in res_tmp.shape]
        res_shape.pop()
        res_tmp = tvm.compute(res_shape, lambda *indice: res_tmp(*indice, 0), name="reduce_res")
    res_s8 = topi.cast(res_tmp, dtype)
    return res_s8
示例#8
0
def _do_atan_taylor(data):
    """
    Taylor algorithm for atan.

        if x > 0 and x < tan(pi/8):
            atan(x) = x - x^3/3 + x^5/5 - x^7/7 ...
        elif x > tan(pi/8) and x < tan(pi/4):
            atan(x) = atan(y) + atan((x-y)/(1+xy))

    Args:
        data (tvm.tensor.Tensor): Input data.

    Returns:
        A tvm.tensor.Tensor of atan(x).
    """
    dtype = data.dtype

    tensor_offset = tvm.const(TAN_PI_BY_EIGHT, dtype)
    deno = topi.multiply(data, tvm.const(TAN_PI_BY_EIGHT, dtype))
    deno = topi.add(deno, dc.one_const(dtype))
    molecule = topi.subtract(data, tensor_offset)
    ddata = topi.divide(molecule, deno)
    ddata = topi.abs(ddata)

    square_ddata = topi.multiply(ddata, ddata)
    res = tvm.const(ATAN_TAYLOR_COEF[CONST_ITERTOR], dtype)
    for i in reversed(range(CONST_ITERTOR)):
        res = topi.multiply(res, square_ddata)
        res = topi.add(res, tvm.const(ATAN_TAYLOR_COEF[i], dtype))
    res = topi.multiply(res, ddata)
    res = topi.add(res, tvm.const(CONST_PI_BY_EIGHT, dtype))

    square_data = topi.multiply(data, data)
    res2 = tvm.const(ATAN_TAYLOR_COEF[CONST_ITERTOR2], dtype)
    for i in reversed(range(CONST_ITERTOR2)):
        res2 = topi.multiply(res2, square_data)
        res2 = topi.add(res2, tvm.const(ATAN_TAYLOR_COEF[i], dtype))
    return topi.minimum(res, topi.multiply(res2, data))
示例#9
0
def _apply_adagrad_da_compute(var, gradient_accum, gradient_squared_accum,
                              grad, lr, l1, l2, global_step):
    """Compute adagrad_da."""
    dtype = var.dtype
    # cast to float32 for higher precision
    if dtype == "float16":
        gradient_accum = topi.cast(gradient_accum, "float32")
        gradient_squared_accum = topi.cast(gradient_squared_accum, "float32")
        grad = topi.cast(grad, "float32")
        lr = topi.cast(lr, "float32")
        l1 = topi.cast(l1, "float32")
        l2 = topi.cast(l2, "float32")
    if product_is_mini():
        global_step = topi.cast(global_step, "float16")
        global_step = topi.cast(global_step, "float32")
    else:
        global_step = topi.cast(global_step, "float32")

    # 1.grad_accum += grad
    gradient_accum = topi.add(gradient_accum, grad)

    # 2.grad_squared_accum += grad * grad
    gs = topi.multiply(grad, grad)
    gradient_squared_accum = topi.add(gradient_squared_accum, gs)

    # 3.if l1 > 0: tmp_val = Sign(grad_accum) * max(|grad_accum|-l1*global_step, 0)
    #   else:      tmp_val = grad_accum
    sign_val = Sign(gradient_accum)
    abs_val = topi.abs(gradient_accum)
    mul_val = topi.multiply(global_step, l1)
    sub_val = topi.subtract(abs_val, mul_val)
    max_val = topi.maximum(sub_val, tvm.const(0, sub_val.dtype))
    tmp_val = topi.multiply(sign_val, max_val)

    def select(l1, tmp_val, gradient_accum):
        """Returns tmp_val if l1 > 0 else gradient_accum."""
        if product_is_mini():
            l1 = topi.cast(l1, "float16")
            tmp_val = topi.cast(tmp_val, "float16")
            gradient_accum = topi.cast(gradient_accum, "float16")
        tmp_val = akg.tvm.compute(
            tmp_val.shape, lambda *i: tvm.expr.Select(l1[0] > 0, tmp_val(*i),
                                                      gradient_accum(*i)))
        return topi.cast(tmp_val, "float32") if product_is_mini() else tmp_val

    tmp_val = select(l1, tmp_val, gradient_accum)

    # 4.x_value = -1 * lr * tmp_val
    x_value = topi.multiply(lr, tvm.const(-1, "float32"))
    x_value = topi.multiply(x_value, tmp_val)

    # 5.y_value = l2 * global_step * lr + sqrt(grad_squared_accum)
    pro_val = topi.multiply(l2, global_step)
    pro_val = topi.multiply(pro_val, lr)
    sqrt_val = sqrt(gradient_squared_accum, target=utils.CCE)
    y_value = topi.add(pro_val, sqrt_val)

    # 6.var = x_value / y_value
    if product_is_mini():
        y_rec = reciprocal(y_value, target=utils.CCE)
        var_out = topi.multiply(x_value, y_rec)
    else:
        var_out = topi.divide(x_value, y_value)

    if dtype == "float16":
        var_out = akg.lang.ascend.cast_to(var_out, "float16")
        gradient_accum = akg.lang.ascend.cast_to(gradient_accum, "float16")
        gradient_squared_accum = akg.lang.ascend.cast_to(
            gradient_squared_accum, "float16")

    return var_out, gradient_accum, gradient_squared_accum
示例#10
0
def _erf_compute(input_x):
    r"""
    Compute erf.

    .. math::
        \operatorname{erf}(x) = sign(x) \left(
            1 - (a_1t+a_2t^2+a_3t^3+a_4t^4+a_5t^5) e^{-x^2} + \epsilon(|x|)
            \right), \\
        t = \dfrac{1}{1+p|x|} \\
        \left|\epsilon(|x|)\right| \le 1.5 \times 10^{-7} \\
        where \; p=.3275911 \quad a_1=.254829592 \quad a_2=-.284496736 \\
        a_3=1.421413741 \quad a_4=-1.453152027 \quad a_5=1.061405429

    Args:
        input_x (tvm.tensor.Tensor): Input tensor.

    Returns:
        tvm.tensor.Tensor as rational approximation.
    """

    dtype = input_x.dtype
    shape = get_shape(input_x)

    cst_one = dc.one_const("float32")
    cst_neg_one = dc.neg_one_const("float32")
    cst_p = tvm.const(SCALER_P, "float32")
    cst_a1 = tvm.const(SCALER_A1, "float32")
    cst_a2 = tvm.const(SCALER_A2, "float32")
    cst_a3 = tvm.const(SCALER_A3, "float32")
    cst_a4 = tvm.const(SCALER_A4, "float32")
    cst_a5 = tvm.const(SCALER_A5, "float32")
    fp16_max = tvm.const(SCALER_FP16_MAX, "float32")
    fp16_min = tvm.const(SCALER_FP16_MIN, "float32")

    if dtype == "float16":
        input_x = topi.cast(input_x, "float32")

    # calculate: sign = floor[(x*fp16max) / (|x*fp16max| + fp16min)]
    data_sign_vmuls = topi.multiply(input_x, fp16_max)
    data_sign_abs = topi.abs(data_sign_vmuls)
    data_adds = topi.add(data_sign_abs, fp16_min)
    data_sign_div = div(data_sign_vmuls, data_adds)
    data_round = round_value(data_sign_div)
    # mini device should cast to fp16 first
    if utils.product_is_mini():
        data_round = topi.cast(data_round, "float16")
    tensor_sign = topi.cast(data_round, "float32")

    # t = 1 / (1 + px)
    tensor_abs = topi.abs(input_x)
    one_plus_px = topi.add(cst_one, topi.multiply(tensor_abs, cst_p))
    data_t = div(topi.full(shape, "float32", 1.0), one_plus_px)

    # e^{-x^2}
    abs_square = topi.multiply(tensor_abs, tensor_abs)
    neg_square = topi.multiply(abs_square, cst_neg_one)
    exp_neg_square = exp(neg_square)

    # a1t + a2t^2 + a3t^3 + a4t^4 + a5t^5 = ((((a5t + a4)t + a3)t + a2)t + a1)t
    tmp_a5 = topi.multiply(cst_a5, data_t)
    tmp_a5a4 = topi.multiply(topi.add(tmp_a5, cst_a4), data_t)
    tmp_a5a4a3 = topi.multiply(topi.add(tmp_a5a4, cst_a3), data_t)
    tmp_a5a4a3a2 = topi.multiply(topi.add(tmp_a5a4a3, cst_a2), data_t)
    data_muladd = topi.multiply(topi.add(tmp_a5a4a3a2, cst_a1), data_t)

    # erf = sign(x) * (1 - data_muladd * e^{-x^2})
    erf_res = topi.multiply(
        tensor_sign,
        topi.add(
            cst_one,
            topi.multiply(cst_neg_one,
                          topi.multiply(data_muladd, exp_neg_square))))

    if dtype == "float16":
        erf_res = topi.cast(erf_res, dtype)

    return erf_res
示例#11
0
def _asum(data, axis, cof):
    data_tmp_input = topi.abs(data)
    tmp = topi.multiply(data_tmp_input, cof)
    res = topi.sum(tmp, axis)
    return res