def gaussian(x, sig=1.0, mean=0.0):
    r"""
    Implementation of gaussian filter.

    :math:`G(x,var) = 1/(2*pi*val^2) * exp(-\sum_j(x_{ij}^2)/(2*var^2))`
    """
    if (len(x.shape) == 1):
        two = akg.tvm.const(2, x.dtype)
        sig_cast = akg.tvm.const(sig, x.dtype)
        return 1 / (sig_cast * sig_cast *
                    (akg.tvm.const(6.283, x.dtype))) * exp.exp(
                        -(x) * (x) / (two * sig_cast * sig_cast))
    elif (len(x.shape) == 2):
        sig_cast = akg.tvm.const(sig, x.dtype)
        x_square = akg.tvm.compute(x.shape, lambda *i: x(*i) * x(*i))
        sum_reduce = akg.topi.sum(akg.tvm.compute(
            x.shape, lambda *i: x_square(*i) * akg.tvm.const(-0.5, x.dtype)),
                                  axis=(1),
                                  keepdims=True)
        return 1 / (sig_cast * sig_cast *
                    (akg.tvm.const(6.283, x.dtype))) * exp.exp(sum_reduce)
    else:
        raise RuntimeError(
            "Do not support {0} dim laplacian of gaussian.".format(len(
                x.shape)))
    def softmax_cross_entropy_with_logits(labels,
                                          logits,
                                          axis,
                                          reduction="mean",
                                          scale=1.0):
        max_logits = reduce_max(logits, axis, keepdims=True)
        data_sub = sub(logits, max_logits)
        akg.register_variables("minus_max", [logits], data_sub)
        data_exp = exp(data_sub)
        data_expsum, _ = sum_value(data_exp, axis, keepdims=True)
        data_expsum_log = log(data_expsum)
        sub_value = sub(data_sub, data_expsum_log)
        neg_labels = neg(labels)
        cross_entropy = mul(neg_labels, sub_value)
        # backprop: prob - labels, where prob = softmax(logits)
        prob = exp(sub_value)
        backprop = sub(prob, labels)

        if reduction.lower() == "none":
            loss, _ = sum_v2(cross_entropy, axis, keepdims=True)
        elif reduction.lower() == "mean":
            loss, _ = sum_v2(cross_entropy, axis=None)
            factor = logits.shape[0].value
            loss = loss * akg.tvm.const(1 / factor, logits.dtype)
            backprop = backprop * akg.tvm.const(1 / factor, logits.dtype)
        elif reduction.lower() == "sum":
            loss, _ = sum_v2(cross_entropy, axis=None)
        else:
            raise ValueError(
                "reduction method {0} is not supported".format(reduction))
        backprop = akg.topi.multiply(backprop,
                                     akg.tvm.const(scale, backprop.dtype))
        return loss, backprop
def sigmoid_cross_entropy_with_logits_grad_compute(predict, target, dout):
    """sigmoid_cross_entropy_with_logits_grad compute implemention"""
    dtype = predict.dtype
    if dtype == "float16":
        predict = topi.cast(predict, "float32")
        target = topi.cast(target, "float32")
        dout = topi.cast(dout, "float32")

    # e^x
    val1 = exp(predict)
    # 1 + e^x
    val2 = topi.add(val1, tvm.const(SCALAR_ONE, dtype="float32"))
    # e^x / (1 + e^x)
    val3 = topi.divide(val1, val2)
    # -target
    val4 = topi.multiply(target, tvm.const(SCALAR_NEGTIVE_ONE,
                                           dtype="float32"))
    # e^x / (1 + e^x) -y
    val5 = topi.add(val3, val4)

    result = topi.multiply(val5, dout)

    if dtype == "float16":
        result = topi.cast(result, dtype)
    return result
Exemple #4
0
def selu_compute(input_data):
    """selu compute implemention"""
    # if input_dtype is float16,convert it to float32
    dtype = input_data.dtype
    if dtype == "float16" or dtype == "float32":
        input_data = topi.cast(input_data, "float32")
        type_tmp = "float32"
    else:
        input_data = topi.cast(input_data, "float16")
        type_tmp = "float16"

    # generate tensor_zero to be compared
    tensor_zero = topi.multiply(input_data, tvm.const(0, dtype=type_tmp))
    # generate negative_res and positive_res to compute
    # When the element value is greater than 0 and less than 0
    negative_res = topi.minimum(input_data, tensor_zero)
    positive_res = topi.maximum(input_data, tensor_zero)
    exp_res = exp(negative_res)
    sub_res = topi.add(exp_res, tvm.const(SCALAR_NEGATIVE_ONE, dtype=type_tmp))
    negative_muls_res = topi.multiply(sub_res, tvm.const(SCALE_ALPHA_PRODUCT, dtype=type_tmp))
    if dtype == "int8":
        negative_muls_res = akg.lang.cce.ceil(negative_muls_res)

    positive_muls_res = topi.multiply(positive_res, tvm.const(SCALE, dtype=type_tmp))
    res = topi.add(negative_muls_res, positive_muls_res)
    # cast to ori_dtype
    if dtype == "float16" or dtype == "int8" or dtype == "int32":
        res = topi.cast(res, dtype)

    return res
Exemple #5
0
def sinh_compute(x):
    """Compute sinh."""
    dtype = x.dtype
    # in order to get the precise calcuate result
    if dtype == "float16":
        x = topi.cast(x, "float32")

    data_exp = exp(x)
    negative_data = topi.multiply(x, -1)
    negative_data_exp = exp(negative_data)
    data_exp_sub = topi.subtract(data_exp, negative_data_exp)

    res = topi.multiply(data_exp_sub, tvm.const(0.5, "float32"))
    if dtype == "float16":
        res = topi.cast(res, "float16")

    return res
Exemple #6
0
def _compute_update(logbase, sign_decay, sign_gm, grad):
    """Calculate var decay."""
    vmul_tmp = tvm.compute(sign_gm.shape,
                           lambda *indice: sign_gm(*indice) * sign_decay[0])
    vmul_tmp = tvm.compute(vmul_tmp.shape,
                           lambda *indice: vmul_tmp(*indice) * logbase[0])
    exp_tmp = exp(vmul_tmp)
    update = topi.multiply(exp_tmp, grad)
    return update
Exemple #7
0
def sigmoid_cross_entropy_with_logits(labels=None, logits=None):
    ##
    # \brief Computes sigmoid cross entropy given `logits`.
    #
    # \f[
    #   cost = lables * -log(sigmoid(logits)) + (1 - lables) * -log(1 - sigmoid(logits))
    # \f]
    # \param labels akg.tvm.Tensor of the same type and shape as `logits`.
    # \param  logits akg.tvm.Tensor of type float16, float32
    #
    # \return akg.tvm.Tensor of the same shape as `logits` with the componentwise logistic losses.
    ##

    if get_shape(logits) != get_shape(labels):
        raise ValueError(
            "logits and labels must have the same shape  (%s vs %s)" %
            (get_shape(logits), get_shape(labels)))
    if logits.dtype != labels.dtype:
        raise ValueError(
            "logits and labels must have the same dtype  (%s vs %s)" %
            (logits.dtype, labels.dtype))

    shape = logits.shape
    dtype = logits.dtype

    check_list = ["float16", "float32"]
    if not (dtype.lower() in check_list):
        raise RuntimeError(
            "sigmoid_cross_entropy_with_logits only support %s while dtype is %s"
            % (",".join(check_list), dtype))

    #    z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
    # =  z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
    # =  max(x, 0) - x * z + log(1 + exp(-abs(x)))

    zero = akg.tvm.const(0, dtype=dtype)
    relu_logits = akg.tvm.compute(
        shape,
        lambda *indice: akg.tvm.expr.Select(
            logits(*indice) < zero, zero, logits(*indice)),
        name="relu_logits")
    neg_abs_logits = akg.tvm.compute(
        shape,
        lambda *indice: akg.tvm.expr.Select(
            logits(*indice) < zero, logits(*indice),
            logits(*indice) * -1),
        name="neg_abs_logits")
    sigmoid_logits = exp(neg_abs_logits) + akg.tvm.const(1, dtype=dtype)
    ln_sigmoid_logits = log(sigmoid_logits)
    logits_mul_lables = mul(logits, labels)
    res = relu_logits - logits_mul_lables + ln_sigmoid_logits
    return res
Exemple #8
0
def exp_ad(head, in_data):
    """
    Compute gradient of exp operator using automatic differentiate.

    Args:
        head (tvm.tensor.Tensor): Tensor of type float16, float32.
        in_data (tvm.tensor.Tensor): Tensor of type float16, float32.

    Returns:
        tvm.tensor.Tensor has the same shape as input.
    """

    # check head's validation.
    vc_util.check_shape(head.shape)
    vc_util.ops_dtype_check(head.dtype, vc_util.DtypeForDavinci.ALL_FLOAT)
    exp_in_data = exp.exp(in_data)
    jacs = list(akg.differentiate(exp_in_data, [in_data], head))
    return jacs[0]
Exemple #9
0
def expm1(data):
    """
    Calculate exp(x) - 1.

    Calculate \f$e^{x}-1\f$, where x is the input tensor and e is Euler's number.

    Args:
        data: Tensor.

    Returns:
        Tensor, has the same type and shape as data.
    """

    vc_util.ops_dtype_check(data.dtype, vc_util.DtypeForDavinci.ALL_FLOAT)

    output = akg.lang.cce.vadds(exp(data), -1)

    return output
Exemple #10
0
def _before_res_compute(abs_data):
    """
    compute bessel_i1e for abs value of data less than or equal to 3.75

    Algrithm:
    t = x / 3.75
    I1(x) = e^-|x|*x*(0.5 + 0.87890594t^2 + 0.51498869t^4 + 0.15084934t^6
                    + 0.02658773t^8 + 0.00301532t^10 + 0.00032411t^12)
    """

    data = topi.multiply(abs_data, 1.0 / CONST_LIMIT)
    data_square = mul(data, data)
    before_res = topi.multiply(data_square, ITR_BEFORE[LEN_BEFORE - 1])
    before_res = topi.add(before_res, ITR_BEFORE[LEN_BEFORE - 2])
    for iter_number in ITR_BEFORE[LEN_BEFORE - 3::-1]:
        before_res = mul(before_res, data_square)
        before_res = topi.add(before_res, iter_number)
    exp_value = exp(neg(abs_data))
    before_res = mul(before_res, exp_value)
    before_res = mul(before_res, abs_data)
    return before_res
Exemple #11
0
def _erf_compute(input_x):
    r"""
    Compute erf.

    .. math::
        \operatorname{erf}(x) = sign(x) \left(
            1 - (a_1t+a_2t^2+a_3t^3+a_4t^4+a_5t^5) e^{-x^2} + \epsilon(|x|)
            \right), \\
        t = \dfrac{1}{1+p|x|} \\
        \left|\epsilon(|x|)\right| \le 1.5 \times 10^{-7} \\
        where \; p=.3275911 \quad a_1=.254829592 \quad a_2=-.284496736 \\
        a_3=1.421413741 \quad a_4=-1.453152027 \quad a_5=1.061405429

    Args:
        input_x (tvm.tensor.Tensor): Input tensor.

    Returns:
        tvm.tensor.Tensor as rational approximation.
    """

    dtype = input_x.dtype
    shape = get_shape(input_x)

    cst_one = dc.one_const("float32")
    cst_neg_one = dc.neg_one_const("float32")
    cst_p = tvm.const(SCALER_P, "float32")
    cst_a1 = tvm.const(SCALER_A1, "float32")
    cst_a2 = tvm.const(SCALER_A2, "float32")
    cst_a3 = tvm.const(SCALER_A3, "float32")
    cst_a4 = tvm.const(SCALER_A4, "float32")
    cst_a5 = tvm.const(SCALER_A5, "float32")
    fp16_max = tvm.const(SCALER_FP16_MAX, "float32")
    fp16_min = tvm.const(SCALER_FP16_MIN, "float32")

    if dtype == "float16":
        input_x = topi.cast(input_x, "float32")

    # calculate: sign = floor[(x*fp16max) / (|x*fp16max| + fp16min)]
    data_sign_vmuls = topi.multiply(input_x, fp16_max)
    data_sign_abs = topi.abs(data_sign_vmuls)
    data_adds = topi.add(data_sign_abs, fp16_min)
    data_sign_div = div(data_sign_vmuls, data_adds)
    data_round = round_value(data_sign_div)
    # mini device should cast to fp16 first
    if utils.product_is_mini():
        data_round = topi.cast(data_round, "float16")
    tensor_sign = topi.cast(data_round, "float32")

    # t = 1 / (1 + px)
    tensor_abs = topi.abs(input_x)
    one_plus_px = topi.add(cst_one, topi.multiply(tensor_abs, cst_p))
    data_t = div(topi.full(shape, "float32", 1.0), one_plus_px)

    # e^{-x^2}
    abs_square = topi.multiply(tensor_abs, tensor_abs)
    neg_square = topi.multiply(abs_square, cst_neg_one)
    exp_neg_square = exp(neg_square)

    # a1t + a2t^2 + a3t^3 + a4t^4 + a5t^5 = ((((a5t + a4)t + a3)t + a2)t + a1)t
    tmp_a5 = topi.multiply(cst_a5, data_t)
    tmp_a5a4 = topi.multiply(topi.add(tmp_a5, cst_a4), data_t)
    tmp_a5a4a3 = topi.multiply(topi.add(tmp_a5a4, cst_a3), data_t)
    tmp_a5a4a3a2 = topi.multiply(topi.add(tmp_a5a4a3, cst_a2), data_t)
    data_muladd = topi.multiply(topi.add(tmp_a5a4a3a2, cst_a1), data_t)

    # erf = sign(x) * (1 - data_muladd * e^{-x^2})
    erf_res = topi.multiply(
        tensor_sign,
        topi.add(
            cst_one,
            topi.multiply(cst_neg_one,
                          topi.multiply(data_muladd, exp_neg_square))))

    if dtype == "float16":
        erf_res = topi.cast(erf_res, dtype)

    return erf_res
Exemple #12
0
def Exp(x):
    """exp"""
    return exp.exp(x)