Ejemplo n.º 1
0
    def softmax_cross_entropy_with_logits(labels,
                                          logits,
                                          axis,
                                          reduction="mean",
                                          scale=1.0):
        max_logits = reduce_max(logits, axis, keepdims=True, target=utils.CCE)
        data_sub = sub(logits, max_logits, target=utils.CCE)
        akg.register_variables("minus_max", [logits], data_sub)
        data_exp = Exp(data_sub, target=utils.CCE)
        data_expsum = sum(data_exp, axis, keepdims=True, target=utils.CCE)
        data_expsum_log = log(data_expsum, target=utils.CCE)
        sub_value = sub(data_sub, data_expsum_log, target=utils.CCE)
        neg_labels = neg(labels, target=utils.CCE)
        cross_entropy = mul(neg_labels, sub_value, target=utils.CCE)
        # backprop: prob - labels, where prob = softmax(logits)
        prob = Exp(sub_value, target=utils.CCE)
        backprop = sub(prob, labels, target=utils.CCE)

        if reduction.lower() == "none":
            loss = sum_v2(cross_entropy, axis, keepdims=True)
        elif reduction.lower() == "mean":
            loss = sum_v2(cross_entropy, axis=None)
            factor = logits.shape[0].value
            loss = loss * akg.tvm.const(1 / factor, logits.dtype)
            backprop = backprop * akg.tvm.const(1 / factor, logits.dtype)
        elif reduction.lower() == "sum":
            loss = sum_v2(cross_entropy, axis=None)
        else:
            raise ValueError(
                "reduction method {0} is not supported".format(reduction))
        backprop = akg.topi.multiply(backprop,
                                     akg.tvm.const(scale, backprop.dtype))
        return loss, backprop
Ejemplo n.º 2
0
def approximate_equal(x, y, tolerance=1e-5):
    """
    abs(x-y) less than or equal to the tolerance

    Args:
        x (tvm.tensor.Tensor): Tensor of type float16, float32.
        y (tvm.tensor.Tensor): Tensor of type float16, float32.
        tolerance (float): default is 1e-5

    Returns:
        tvm.tensor.Tensor. If abs(x-y) less than or equal to the tolerance return True,
        else return False.
    """

    if tolerance < 0:
        raise RuntimeError("tolerance should >= 0")

    # check shape
    vc_util.check_shape(x)
    vc_util.check_shape(y)
    shape = get_shape(x)
    if shape != get_shape(y):
        raise RuntimeError("input shape must be same, but got %s vs %s", shape,
                           get_shape(y))

    # check input tensor data_type
    vc_util.ops_dtype_check(x.dtype, vc_util.DtypeForDavinci.ALL_FLOAT)
    vc_util.ops_dtype_check(y.dtype, vc_util.DtypeForDavinci.ALL_FLOAT)
    dtype = x.dtype
    if dtype != y.dtype:
        raise RuntimeError("input type must be same, but got %s  vs %s", dtype,
                           y.dtype)

    res_vsub = sub(x, y)
    res_vabs = abs_value(res_vsub)

    # As vcmp_lt and vsel instruction don't support fp32 on mini
    # It can be simplified by some methods, such as , "auto cast"
    if utils.product_is_mini():
        dtype = "float16"
        res_vabs = cast(res_vabs, dtype)

    t = akg.tvm.compute(shape, lambda *indice: akg.tvm.const(1, dtype), "t")
    f = akg.tvm.compute(shape, lambda *indice: akg.tvm.const(0, dtype), "f")
    res = akg.tvm.compute(
        shape, lambda *indice: akg.tvm.expr.Select(
            res_vabs[indice] <= akg.tvm.const(tolerance, dtype), t[indice], f[
                indice]))

    #  It can be be simplified that let cast op support fp16 and fp32 to bool type
    res_fp16 = cast(res, "float16")
    res_bool = akg.tvm.compute(
        shape, lambda *indice: res_fp16(*indice).astype("bool"))
    return res_bool
Ejemplo n.º 3
0
def equal(input1, input2):
    """
    check whether input1 equals to input2.

    Args:
        input1 (tvm.tensor.Tensor): input argument has type float16, float32 and int32.
        input2 (tvm.tensor.Tensor): input argument has type float16, float32 and int32.

    Returns:
        tvm.tensor.Tensor. If input1 equal to input2 return True, else return False.
    """
    # check shapes
    shape1 = [x.value for x in input1.shape]
    shape2 = [x.value for x in input2.shape]
    shapes = [shape1, shape2]
    for _, shp in enumerate(shapes):
        vc_util.check_shape(shp)

    vc_util.ops_dtype_check([input1.dtype, input2.dtype], [
        vc_util.DtypeForDavinci.ALL_FLOAT, vc_util.DtypeForDavinci.INT32,
        vc_util.DtypeForDavinci.INT8, vc_util.DtypeForDavinci.UINT8
    ])

    dtype = input1.dtype
    orig_dtype = dtype
    if utils.product_is_mini() and dtype != "float16":
        dtype = "float16"
    if (not utils.product_is_mini()) and dtype not in ("float16", "float32"):
        # for int32, if cast to float16, there may be overflow
        dtype = "float32"

    if orig_dtype == "float32" and dtype == "float16":
        input_sub = sub(input1, input2)
        input_sub = cast(input_sub, dtype)
        zero = akg.tvm.const(0.0, dtype)
        res = akg.topi.equal(input_sub, zero)
    else:
        input1 = cast(input1, dtype)
        input2 = cast(input2, dtype)
        res = akg.topi.equal(input1, input2)
    return res
Ejemplo n.º 4
0
def Sub(x, y):
    """sub"""
    return sub.sub(x, y)
Ejemplo n.º 5
0
def mul_sub_mutioutput(first_input, second_input, third_input):
    temp = mul.mul(first_input, second_input)
    output = sub.sub(temp, third_input)
    return [temp, output]
Ejemplo n.º 6
0
def sub_ad(head, a, b):
    output = sub.sub(a, b)
    _jacs = list(akg.differentiate(output, [a], head))
    return _jacs[0]
Ejemplo n.º 7
0
def mul_sub(first_input, second_input, third_input):
    temp = mul.mul(first_input, second_input)
    output = sub.sub(temp, third_input)
    return output