Ejemplo n.º 1
0
def case_1(data_shape, dtype, kernel_name, attrs):
    """elemwise chain case 1"""
    vc_util.ops_dtype_check(dtype, vc_util.DtypeForDavinci.FLOAT16)
    vc_util.check_shape_length_equal("data", data_shape, 2)

    m, k = data_shape

    A = akg.tvm.placeholder((m, k), name='A', dtype=dtype)
    B = akg.tvm.placeholder((k, ), name='B', dtype=dtype)
    C = akg.tvm.placeholder((m, k), name='C', dtype=dtype)

    E = akg.tvm.compute((m, k),
                        lambda i, j: A[i, j] * (B[j] + C[i, j]),
                        name="E")

    forward_s = akg.tvm.create_schedule(E.op)
    op_vars = [A, B, C, E]
    forward_low = akg.lower(forward_s,
                            op_vars,
                            simple_mode=True,
                            polyhedral=True)

    kernel_name = utils.gen_name_kernel(kernel_name, dtype, data_shape)

    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod = akg.build(forward_s,
                        op_vars,
                        "cce",
                        name="test",
                        attrs=attrs,
                        polyhedral=True)
        source_code = mod.imported_modules[0].get_source()
        return mod
Ejemplo n.º 2
0
def add_b_conv(fmap_shape, filter_shape, pad_, stride_, dilation_,
               tile_hh=0, tile_coco=0, tile_mm=0, tile_kk=0, tile_nn=0, bypass_l1=False,
               use_bias=False, block_size=16, conv_dtype='float16'):
    conv, a_value, b_value, bias_value, kernel_name, dim_info = add_b_conv_compute(fmap_shape, filter_shape, pad_, stride_, dilation_,
                                                                                   tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, bypass_l1,
                                                                                   use_bias, block_size, conv_dtype)
    # schedule
    s = akg.tvm.create_schedule(conv.op)
    print(conv, a_value, b_value, bias_value)

    attrs = {}
    attrs["pragma_reschedule"] = True
    attrs["pragma_rmselfdep"] = False
    attrs['dim'] = dim_info
    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):

        if use_bias:
            mod = akg.build(s, [a_value, b_value, bias_value, conv], "cce", name=kernel_name, attrs=attrs, polyhedral=True)
        else:
            mod = akg.build(s, [a_value, b_value, conv], "cce", name=kernel_name, attrs=attrs, polyhedral=True)
    source_code = mod.imported_modules[0].get_source()
    cce_path = '.'
    utils.create_code(kernel_name, cce_path, source_code)

    return mod
Ejemplo n.º 3
0
def Gather(params_shape,
           indices_shape,
           params_dtype,
           indices_dtype,
           axis,
           kernel_name,
           cce_path="./",
           target=utils.CCE):
    """Gather data by indices"""
    utils.check_shape(params_shape, length=2)
    utils.check_shape(indices_shape, length=1)
    utils.ops_dtype_check(params_dtype, utils.DtypeForDavinci.ALL_TYPES)
    utils.ops_dtype_check(indices_dtype, utils.DtypeForDavinci.INT32)
    utils.check_equal("axis", "zero", axis, 0)

    # construct compute
    o_shape = (indices_shape[0], params_shape[1])
    xx = akg.tvm.placeholder(params_shape, dtype=params_dtype, name="X")
    yy = akg.tvm.placeholder(indices_shape, dtype=indices_dtype, name="Y")
    res = akg.tvm.extern(o_shape, [xx, yy],
                         lambda ins, outs: kernel_ir(outs[0], ins[0], ins[1]),
                         name="res",
                         dtype=params_dtype)
    s = akg.tvm.create_schedule(res.op)

    # create cce
    attrs = {"enable_multicore": False}
    with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True):
        mod = akg.build(s, [xx, yy, res], "cce", name=kernel_name, attrs=attrs)

    source_code = mod.imported_modules[0].get_source()
    create_code(kernel_name, cce_path, source_code)

    return mod
Ejemplo n.º 4
0
def globalavgpool(n, c, h, w, pool_type, attrs, kernel_name="global_pool"):
    """
    Performs the global average pooling on the input. For each feature map we can define the formula as:
    \f[
     res = \frac{1}{W * H} \\sum X_{i,j}
    \f]
    Note:
        The real input is create by akg.tvm.placeholder
    Args:
        n (int): input batchsize.
        c (int): input channel.
        h (int): input height.
        w (int): input weight.
        pool_type (str): pooling mode, default average.
        attrs (str): Default None.
        kernel_name (str): a str about kernel_name

    Returns:
            tvm.tensor.Tensor of shape n * c * 1 * 1
    """

    input = akg.tvm.placeholder((n, c, h, w), name='input', dtype="float16")
    output = akg.topi.nn.global_pool(input, pool_type=pool_type)
    s = akg.tvm.create_schedule(output.op)
    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod = akg.build(s, [input, output],
                        "cce",
                        name=kernel_name,
                        attrs=attrs,
                        polyhedral=True)
    return mod
Ejemplo n.º 5
0
def logsoftmax_ad(shape, dtype, axis, kernel_name, attrs):
    """Compute the gradient of logsoftmax by autodiff."""
    check_list = ["float16"]
    if not dtype.lower() in check_list:
        raise RuntimeError("logsoftmax test only support %s while dtype is %s" % (",".join(check_list), dtype))
    # check_shape(shape)
    if axis < 0:
        axis = len(shape) + axis
    if axis >= len(shape):
        raise RuntimeError("axis should be less than dimension")
    if axis != len(shape) - 1:
        raise RuntimeError("Only support the last axis currently")

    shape_new = [shape[-2], shape[-1]]
    if len(shape) > 2:
        for i in range(len(shape) - 2):
            shape_new[0] = shape_new[0] * shape[i]
    shape = shape_new

    a_up = akg.tvm.placeholder(shape, dtype=dtype, name="input")
    b_up = logsoftmax.logsoftmax_op(a_up, shape, axis)

    head = akg.tvm.placeholder(b_up.shape, name="head", dtype=dtype)
    _jacs = list(akg.differentiate(b_up, [a_up], head))
    sjac = akg.tvm.create_schedule([_jacs[0].op])
    sjac[_jacs[0].op.input_tensors[1]].compute_inline()
    op_vars = [head, a_up, _jacs[0]]

    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod = akg.build(sjac, op_vars, "cce", name="test2", attrs=attrs, polyhedral=True)
        return mod
Ejemplo n.º 6
0
def focalloss_ad_run2(shape, dtype, attrs):
    logits_pld = akg.tvm.placeholder(shape, dtype=dtype, name='logits')
    labels_pld = akg.tvm.placeholder(shape, dtype='int32', name='labels')
    d_labels, d_logits, head = focalloss_ad.focalloss_ad(
        labels_pld, logits_pld)
    print("autodiff d_logits:\n", akg.tvm.PrintTensorRecursively(d_logits))
    print("autodiff d_labels:\n", akg.tvm.PrintTensorRecursively(d_labels))

    # build autodiff kernels
    io = [labels_pld, logits_pld, head, d_labels, d_logits]
    s = akg.tvm.create_schedule([e.op for e in io])
    kernel_name = utils.gen_name_kernel("focalloss_ad", dtype, (
        shape[0],
        shape[1],
    ))
    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod = akg.build(s,
                        io,
                        "cce",
                        name=kernel_name,
                        attrs=attrs,
                        polyhedral=True)

    labels_np = RANGEFILL((batchsize, ))
    logits_np = RANGEFILL((batchsize, ), 2)
    head_np = RANGEFILL((batchsize, ), 2)
    output = np.full(expect.shape, np.nan, dtype)
    output = utils.mod_launch(mod, (labels_np, logits_np, head_np, output),
                              expect=output)
    expect = output  # hack

    return (input_np, head_np), output, expect, compare_tensor(output,
                                                               expect,
                                                               atol=0.1)
Ejemplo n.º 7
0
def op_build_to_func(opnames, computes, args, custom_schedule, device, kernel_name, attrs):
    """op_build_to_func"""
    if device not in ("aicore", "aicpu"):
        logging.error("Device %s is not in [aicore, aicpu].", device)
        return None

    polyhedral = True
    dump_ir = os.getenv(MS_AKG_DUMP_IR) == "on"

    try:
        tmp_outputs = [x.op for x in computes]
        s = akg.tvm.create_schedule(tmp_outputs)
        if custom_schedule:
            polyhedral = False
            custom_schedule(s)

        with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=dump_ir):
            if attrs:
                binds = attrs.pop(BINDS, None)
                rst = akg.build_to_func(s, args, name=kernel_name, attrs=attrs, polyhedral=polyhedral,
                                        binds=binds, target=device)
            else:
                rst = akg.build_to_func(s, args, name=kernel_name, polyhedral=polyhedral, target=device)

    except Exception:
        logging.error(traceback.format_exc())
        return None
    return rst
Ejemplo n.º 8
0
def matmul_ad(data_shape, weight_shape, dtype, attrs=None):
    check_list = ["float16"]
    if not (dtype.lower() in check_list):
        raise RuntimeError("matmul test only support %s while dtype is %s" %
                           (",".join(check_list), dtype))
    # check_shape(shape)
    assert (len(data_shape) == 2)
    assert (len(weight_shape) == 2)
    assert (data_shape[1] == weight_shape[0])

    m, k = data_shape
    _, n = weight_shape

    a = akg.tvm.placeholder((m, k), name='a', dtype=dtype)
    b = akg.tvm.placeholder((k, n), name='b', dtype=dtype)
    kk = akg.tvm.reduce_axis((0, k), name='kk')
    c = akg.tvm.compute(
        (m, n),
        lambda i, j: akg.lang.ascend.mmad(a[i, kk] * b[kk, j], axis=kk),
        name="c")

    head = akg.tvm.placeholder(c.shape, name="Head", dtype='float16')
    _jacs = list(akg.differentiate(c, [a], head))
    sjac = akg.tvm.create_schedule([_jacs[0].op])
    op_vars = [head, b, _jacs[0]]

    with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True):
        mod = akg.build(sjac,
                        op_vars,
                        "cce",
                        name="test2",
                        attrs=attrs,
                        polyhedral=True)
        return mod
Ejemplo n.º 9
0
def test_001():
    shape = (1, 256, 8)
    topk = int(32)
    score_threshold = float(0)
    dtype = "float16"
    kernel_name = "cce_proposal_sort_fp16"
    attrs = None

    data_np, expect = np_proposal_sort(shape, topk, score_threshold)
    output = np.full(expect.shape, 0, dtype)

    data = akg.tvm.placeholder(shape, dtype, "input_1")
    out = proposal_sort.proposal_sort(data, topk, score_threshold)

    s = akg.tvm.create_schedule(out.op)
    with akg.build_config(add_lower_pass=[(0, akg.tvm.ParseHalideIRFromCode)],
                          dump_pass_ir=False):
        mod = akg.build(s, [data, out],
                        "cce",
                        name="proposal_sort",
                        polyhedral=True)
    output = utils.mod_launch(mod, (data_np, output))
    test_case_result = compare_tensor(output,
                                      expect,
                                      rtol=5e-03,
                                      equal_nan=True)
    assert (test_case_result)
    print(" ========== PARSER PASSED ============")
Ejemplo n.º 10
0
Archivo: topk.py Proyecto: zhuyawen/akg
def topk(shape, k, dtype, kernel_name, attrs):
    check_list = ["float16", "int32"]
    if not (dtype.lower() in check_list):
        raise RuntimeError("tile_cce only support %s while dtype is %s" %
                           (",".join(check_list), dtype))
    if k > shape[-1]:
        raise RuntimeError("k should not be greater than shape[-1]")

    shape = (16, 16)
    out_shape = (16, 16)
    temp_shape = (16, 16 * 18)
    inputs = akg.tvm.placeholder(shape, name="input", dtype="float16")
    output = akg.tvm.placeholder(out_shape, name="output", dtype="float16")
    temp = akg.tvm.placeholder(temp_shape, name="temp", dtype="float16")

    values = compute_topk(output, inputs, temp)
    values1 = compute_get_last(values, temp)

    s = akg.tvm.create_schedule([values1.op])
    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod = akg.build(s, [inputs, values1],
                        "cce",
                        name=kernel_name,
                        attrs=attrs,
                        polyhedral=True)
        return mod
Ejemplo n.º 11
0
def invert_permutation_run(shape, dtype, attrs):
    # check shapes
    vc_util.check_shape(shape)

    if not (dtype.lower() in "int32"):
        raise RuntimeError(
            "indices_dtype only support int32 while dtype is %s" % dtype)

    A = akg.tvm.placeholder(shape, dtype, name="A")
    op = invert_permutation.invert_permutation(A)
    s = akg.tvm.create_schedule(op.op)

    kernel_name = utils.gen_name_kernel("invert_permutation", dtype, shape)
    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod = akg.build(s, [A, op],
                        "cce",
                        name=kernel_name,
                        attrs=attrs,
                        polyhedral=True)

    input_data = np.random.permutation(np.arange(shape[0])).astype(np.int32)
    expect = np.full([shape[0]], 0, np.int32)
    for i, e in enumerate(input_data):
        expect[e] = i

    output = np.full([shape[0]], 0, np.int32)
    output = utils.mod_launch(mod, (input_data, output), expect=expect)

    return (input_data, ), output, expect, compare_tensor(output,
                                                          expect,
                                                          rtol=5e-03,
                                                          equal_nan=True)
Ejemplo n.º 12
0
def test_CCE_Conv(FMap_shape, Filter_shape, Pad, Stride,
                  Tile_h=0, Tile_co=0, Tile_m=0, Tile_k=0, Tile_n=0,
                  use_bias=False, fp32_mad = True, kernel_name="conv"):

    # adjust to TilingApi
    # feature map (NCHW -> NC1HWC0)
    fmap_n, fmap_c, fmap_h, fmap_w = FMap_shape
    fmap_shape_NC1HWCO = (fmap_n, fmap_c // block_size, fmap_h, fmap_w, block_size)

    # filter (NCHW -> C1HWNC0)
    filter_n, filter_c, filter_h, filter_w = Filter_shape
    filter_shape_C1HWNC0 = (filter_c // block_size, filter_h, filter_w, filter_n, block_size)
    # filter (C1HWNC0 -> filter_fractal)
    filter_shape_fractal = (
        filter_c * filter_h * filter_w // block_size, filter_n // block_size, block_size, block_size)

    # stride (stride_h, stride_w)
    stride = Stride

    # fmap_placeholder (NC1HWCO)
    fmap_placeholder = akg.tvm.placeholder(fmap_shape_NC1HWCO, dtype=conv_dtype, name='fmap')
    # filter_placeholder (fractal)
    filter_placeholder = akg.tvm.placeholder(filter_shape_fractal, dtype=conv_dtype, name='filter')

    if use_bias:
        bias_shape = (1, filter_n // block_size, 1, 1, block_size)
        bias_placeholder = akg.tvm.placeholder(bias_shape, dtype= conv_dtype, name='bias')
        conv_dsl_input = (fmap_placeholder, filter_placeholder, bias_placeholder)
    else:
        conv_dsl_input = (fmap_placeholder, filter_placeholder)

    conv_dsl_outputs = conv_dsl(conv_dsl_input, fmap_shape_NC1HWCO, filter_shape_C1HWNC0, Pad, stride, use_bias, fp32_mad)

    # calculate the tiling factor.
    Wo = (fmap_w + Pad[2] + Pad[3] - filter_w) // (stride[1]) + 1
    H_tiling = (Tile_h - filter_h) // (stride[0]) + 1

    # For adjusting to TilingApi, here are some tiling factor changes.
    # tiling_factor_h occurs in L1, and Tile_n is means the n in 'nchw', so we need translate it to H_tiling
    # used as Ho in A_im2col_row_major_shape
    # others are similar, they need to be changed to format where them are used.
    tiling_factor_h = H_tiling * Wo // block_size * block_size
    tiling_factor_co = Tile_co // block_size
    tiling_factor_m = Tile_m // block_size * block_size
    tiling_factor_n = Tile_n // block_size
    tiling_factor_k = Tile_k // block_size

    # schedule
    # pick the last one as the final result
    s = akg.tvm.create_schedule(conv_dsl_outputs[-1].op)


    conv_sch(s, (conv_dsl_input, conv_dsl_outputs), tiling_factor_h=tiling_factor_h,
             tiling_factor_m=tiling_factor_m, tiling_factor_k=tiling_factor_k, tiling_factor_n=tiling_factor_n)

    args = list(conv_dsl_input) + [conv_dsl_outputs[-1]]
    with akg.build_config(add_lower_pass = cce.debug_mode(0), dump_pass_ir = True):
        mod = akg.build(s, args, "cce", name=kernel_name, attrs= {"loop_partition_unroll": True})
        return mod
Ejemplo n.º 13
0
def roipool(shape,
            roibox,
            pooled_shape,
            dtype,
            kernel_name="roipool_forward_output",
            attrs=None,
            target="cce"):
    check_list = ["float16"]
    if not (dtype.lower() in check_list):
        raise RuntimeError("tile_cce only support %s while dtype is %s" %
                           (",".join(check_list), dtype))
    utils.check_shape(shape)
    assert (len(shape) == 4)
    assert (len(roibox) == 4)
    assert (len(pooled_shape) == 2)

    a_n, a_c, a_h, a_w = shape
    roi_t, roi_b, roi_l, roi_r = roibox
    assert (roi_t >= 0 and roi_t < roi_b and roi_b < a_h)
    assert (roi_l >= 0 and roi_l < roi_r and roi_r < a_w)

    a = akg.tvm.placeholder(shape, name="a", dtype=dtype)
    Crop = akg.tvm.compute([a_n, a_c, roi_b - roi_t, roi_r - roi_l],
                           lambda n, c, h, w: a[n, c, roi_t + h, roi_l + w])

    p_h, p_w = pooled_shape
    win_h = (roi_b - roi_t) // p_h + (1 if (roi_b - roi_t) % p_h > 0 else 0)
    win_w = (roi_r - roi_l) // p_w + (1 if (roi_r - roi_l) % p_w > 0 else 0)

    assert p_h <= (roi_b - roi_t) and p_w <= (roi_r - roi_l)

    Unpooled = akg.tvm.compute(
        [a_n, a_c, p_h, p_w, win_h, win_w],
        lambda n, c, h, w, wh, ww: akg.tvm.expr.Select(
            akg.tvm.all(h * win_h + wh < roi_b - roi_t, w * win_w + ww < roi_r
                        - roi_l), Crop[n, c, h * win_h + wh, w * win_w + ww],
            akg.tvm.const(0, a.dtype)))

    rh = akg.tvm.reduce_axis((0, win_h))
    rw = akg.tvm.reduce_axis((0, win_w))
    output_shape = [a_n, a_c, p_h, p_w]
    res = akg.tvm.compute(
        output_shape,
        lambda n, c, h, w: akg.tvm.max(Unpooled[n, c, h, w, rh, rw],
                                       axis=[rh, rw]))
    s = akg.tvm.create_schedule(res.op)
    s[Crop].compute_inline()
    s[Unpooled].compute_inline()
    kernel_name = utils.gen_name_kernel(kernel_name, dtype, shape)
    with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True):
        mod = akg.build(s, [a, res],
                        "cce",
                        name=kernel_name,
                        attrs=attrs,
                        polyhedral=True)
        return mod, output_shape
Ejemplo n.º 14
0
def div_mod_issue(data_shape, weight_shape, case_number):

    if (case_number == 0):
        A = akg.tvm.placeholder(data_shape, dtype='float16', name='input0')
        divisor = 2
        stage1 = akg.tvm.compute(
            data_shape,
            lambda n, c, h, w: A[n, c / divisor, h, w] + 1,
            name="stage1")
        op_vars = [A, stage1]
        s = akg.tvm.create_schedule([stage1.op])
        akg.lower(s, op_vars, simple_mode=True, polyhedral=True)
        with akg.build_config(add_lower_pass=cce.debug_mode(0),
                              dump_pass_ir=True):
            mod = akg.build(s, op_vars, "cce", name="test1", polyhedral=True)
        return mod
    else:
        A = akg.tvm.placeholder(data_shape, dtype='float16', name='input0')
        B = akg.tvm.placeholder(weight_shape, dtype='float16', name='input1')

        divisor = 3
        stage1 = akg.tvm.compute(
            data_shape,
            lambda n, c, h, w: A[n, c / divisor, h, w] + 1,
            name="stage1")
        stage2 = akg.tvm.compute(
            weight_shape,
            lambda n, c, h, w: stage1[0, c, 0, 0] + B[n, c, h, w],
            name="stage2")
        op_vars = [A, B, stage2]

        s = akg.tvm.create_schedule([stage2.op])
        akg.lower(s, op_vars, simple_mode=True, polyhedral=True)

        with akg.build_config(add_lower_pass=cce.debug_mode(0),
                              dump_pass_ir=True):
            mod_stage2 = akg.build(s,
                                   op_vars,
                                   "cce",
                                   name="test2",
                                   polyhedral=True)
        return mod_stage2
Ejemplo n.º 15
0
def my_dsl(dtype, kernel_name, attrs):
    m = tvm.var("M")
    n = tvm.var("N")
    A = tvm.placeholder((m, ), name="A", dtype=dtype)
    B = tvm.placeholder((m, ), name="B", dtype=dtype)

    if insn == "add":
        C = topi.add(A, B)
    elif insn == "sub":
        C = topi.subtract(A, B)
    if insn == "mul":
        C = topi.multiply(A, B)
    elif insn == "div":
        C = topi.divide(A, B)
    elif insn == "max":
        C = topi.maximum(A, B)
    elif insn == "min":
        C = topi.minimum(A, B)

    elif insn == "abs":
        C = tvm.compute(A.shape, lambda *index: tvm.abs(A(*index)), name='C')
    elif insn == "exp":
        C = topi.exp(A)
    elif insn == "log":
        C = topi.log(A)
    elif insn == "sqrt":
        C = topi.sqrt(A)
        C = topi.log(A)
    elif insn == "sqrt":
        C = topi.sqrt(A)

    elif insn == "adds":
        C = A + tvm.const(2, dtype)
    elif insn == "muls":
        C = A * tvm.const(2, dtype)

    # C = tvm.compute((m, ), lambda i: A[i] + B[i], name="C")
    s = tvm.create_schedule([C.op])
    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        if insnType == "binary":
            mod = akg.build(s, [A, B, C],
                            "cce",
                            name=kernel_name,
                            attrs=attrs,
                            polyhedral=True)
        else:
            mod = akg.build(s, [A, C],
                            "cce",
                            name=kernel_name,
                            attrs=attrs,
                            polyhedral=True)
    return mod
Ejemplo n.º 16
0
def fc(fMapBatch, weight, fc_dtype, block_size, attrs, kernel_name="Fully_Connected"):
    """
    Computes full connection.

    Args:
        fMapBatch(akg.tvm.Tensor): Should be a 4D tensor.
        weight(akg.tvm.Tensor): Should be a 4D tensor of same type as fMapBatch.
        fc_dtype(str): Specifies data type of input tensors.
        block_size(int): Block size.
        attrs(dicts): Attributes.
        kernel_name(str): Kernel name.

    Returns:
        akg.tvm.Tensor of same type as input tensors.
    """
    # NCHW
    f_n, f_c, f_h, f_w = fMapBatch.shape
    w_n, w_c, w_h, w_w = weight.shape

    if f_c != w_c or f_h != w_h or f_w != w_w or w_n < 32:
        raise RuntimeError("invalid input shape")
    f_shape_nc1hwc0 = (f_n, f_c // block_size, f_h, f_w, block_size)

    w_shape_fractal = (w_c // block_size * w_h * w_w, w_n // block_size, block_size, block_size)

    A = akg.tvm.placeholder(f_shape_nc1hwc0, dtype=fc_dtype, name='fmap')
    B = akg.tvm.placeholder(w_shape_fractal, dtype=fc_dtype, name='weight')

    out_shape_nc1hwc0 = (f_n, w_n // block_size, 1, 1, block_size)

    weight_shape_nc1hwc0 = (w_n, w_c // block_size, w_h, w_w, block_size)

    _, k_c1, k_h, k_w, k_c0 = weight_shape_nc1hwc0

    kc1 = akg.tvm.reduce_axis((0, k_c1), name='kc1')
    kh = akg.tvm.reduce_axis((0, k_h), name='kh')
    kw = akg.tvm.reduce_axis((0, k_w), name='kw')
    kc0 = akg.tvm.reduce_axis((0, k_c0), name='kc0')

    res = akg.tvm.compute(out_shape_nc1hwc0,
                      lambda n, c1, h, w, c0: akg.lang.ascend.mmad(
                          A[n, kc1, (h + kh), (w + kw), kc0]
                          * B[(kc1 * k_h + kh) * k_w + kw, c1, c0, kc0],
                          axis=[kc1, kh, kw, kc0]), name="res")

    s = akg.tvm.create_schedule(res.op)
    with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True):
        mod = akg.build(s, [A, B, res], "cce", name=kernel_name, attrs=attrs, polyhedral=True)

    return mod
Ejemplo n.º 17
0
def intrin_load3d(A_shape, strides, kernel_size, padding):


    _, _, _, _, c0_value = A_shape
    stride_h, stride_w = strides
    kernel_h, kernel_w = kernel_size
    pad_t, pad_b, pad_l, pad_r = padding

    l1_h = akg.tvm.var("l1_h", dtype='int32')
    l1_w = akg.tvm.var("l1_w", dtype='int32')
    # we know that the n-batch and C1 are fixed. The H and W of the piece of A are unknown.
    a = akg.tvm.placeholder((1, 1, l1_h, l1_w, c0_value), dtype=conv_dtype)
    fp_w = akg.tvm.var("fp_w")
    fp_h = akg.tvm.var("fp_h")
    fm_w = akg.tvm.var("fm_w")
    fm_h = akg.tvm.var("fm_h")
    fp_c1 = akg.tvm.var("fp_c1")
    pad_t = akg.tvm.var("pad_t")
    pad_b = akg.tvm.var("pad_b")
    l1_h_fmatrix = akg.tvm.var("l1_h_fmatrix")

    # Output will be of shape (block_size (window positions), C0)  = (16x16)

    c = akg.tvm.compute((block_size, c0_value), lambda *indices : manual_im2col_1repeat(indices, a, fp_w, fp_h, fm_w, fm_h,  pad_t, pad_b, l1_h_fmatrix, stride_w), name='im2col_manual')

    Ab_scope = "local.L1"
    Cb_scope = "local.L0A"

    Ab = akg.tvm.decl_buffer(a.shape, a.dtype,
                         name="Abuf",
                         offset_factor=1, scope=Ab_scope) #, strides=[akg.tvm.var("s1"), akg.tvm.var("s2"), akg.tvm.var("s3"), akg.tvm.var("s4"), akg.tvm.var("s5")])

    Cb = akg.tvm.decl_buffer(c.shape, c.dtype, name="Cbuf", offset_factor=1, scope=Cb_scope)

    def intrin_func(ins, outs, sp):
        aa = ins[0]
        dd = outs[0]
        def _body():
            ib = akg.tvm.ir_builder.create()
            ib.emit(akg.tvm.call_extern("int32", "cce_img2col_",
                                    dd.access_ptr("w"),
                                    aa.access_ptr("r"),
                                    # the constant params are dilation, jump offset, repeat-mode, # repeats, c0 mode
                                    sp[0], sp[1], sp[2], sp[3], sp[4], stride_w, stride_h, kernel_w, kernel_h, 1, 1, 1, 0, 1, 0, sp[5], sp[6], pad_l, pad_r, sp[7], l1_w))

            return ib.get()
        return _body()

    with akg.build_config(offset_factor=1):
        return akg.tvm.decl_tensor_intrin(c.op, intrin_func, binds={a: Ab, c: Cb}, scalar_params=[fp_w, fp_h, fm_w, fm_h, fp_c1, pad_t, pad_b, l1_h_fmatrix])
Ejemplo n.º 18
0
def range_run(start, limit, delta, dtype, attrs):
    t_range = tvm_range.range_value(start, limit, delta, dtype)
    # Create module
    sch = akg.tvm.create_schedule(t_range.op)
    kernel_name = "range"
    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod = akg.build(sch, [t_range], "cce", name=kernel_name, attrs=attrs, polyhedral=True)
        print(mod.imported_modules[0].get_source())
    # Generate data for testing the op
    expect = np.asarray(list(range(start, limit, delta)))

    output = np.full((max(0, (limit - start) / delta),), np.nan, dtype)
    output = utils.mod_launch(mod, (output, ), expect=expect)

    return tuple(), output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True)
Ejemplo n.º 19
0
def test_select():
    N = 128

    actual = akg.tvm.placeholder((N, ), name='actual', dtype='int32')
    predict = akg.tvm.placeholder((N, ), name='predict', dtype='int32')
    k = akg.tvm.reduce_axis((0, N), name='k')
    output = akg.tvm.compute(
        (N, N), lambda i, j: akg.tvm.sum(akg.tvm.expr.Select(
            akg.tvm.all(i == actual[k], j == predict[k]), 1.0, 0.0),
                                         axis=k))

    s = akg.tvm.create_schedule(output.op)

    # build the cce kernel
    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod = akg.build(s, [actual, predict, output], "cce", polyhedral=True)
Ejemplo n.º 20
0
def concat_ad_run(shapes, dtype, axis, attrs):
    # prepare inputs placeholder
    inp_dtype = dtype.lower()
    data = []
    for i in range(len(shapes)):
        shape = shapes[i]
        data.append(
            akg.tvm.placeholder(shape, name="data_%d" % i, dtype=inp_dtype))

    kernel_name = utils.genKernelName("concat", inp_dtype, shapes)
    res, head = concat_ad.concat_ad(data, axis)

    opvars = [head] + data + [res]
    s = akg.tvm.create_schedule(res.op)
    op_attrs = [axis]

    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        mod = utils.op_build_test(concat_ad.concat_ad, [shapes],
                                  [dtype.lower()],
                                  op_attrs,
                                  kernel_name=kernel_name,
                                  attrs=attrs,
                                  tuning=t)
        if t:
            args, expect, head_data, inputs = gen_data(dtype, head, shapes)
            return mod, expect, tuple(args)
        else:
            return mod
    else:
        # build the cce kernel
        with akg.build_config(add_lower_pass=utils.debug_mode(0),
                              dump_pass_ir=True):
            mod = akg.build(s,
                            opvars,
                            "cce",
                            name=kernel_name,
                            attrs=attrs,
                            polyhedral=True)
        print(mod.imported_modules[0].get_source())

        args, expect, head_data, inputs = gen_data(dtype, head, shapes)
        output = utils.mod_launch(mod, tuple(args), expect=expect)
        return tuple(inputs) + (head_data, ), output, expect, compare_tensor(
            output, expect, rtol=5e-03, equal_nan=True)
Ejemplo n.º 21
0
def floormod(shape, dtype, kernel_name, attrs):
    """
    Compute element-wise remainder of division.
    \f$res=a - floor(a/b) * b\f$

    Args:
         shape (list): a list has any nums.
         dtype (str): parameters' type.
         kernel_name (str): a str about kernel_name.
         attrs (str): Default None.
    Returns:
            tvm.tensor.Tensor, shape and dtype are input params.
    """

    vc_util.ops_dtype_check(
        dtype,
        [vc_util.DtypeForDavinci.ALL_FLOAT, vc_util.DtypeForDavinci.INT32])
    vc_util.check_shape(shape)

    a = akg.tvm.placeholder(shape=shape, name="a", dtype=dtype)
    b = akg.tvm.placeholder(shape=shape, name="b", dtype=dtype)

    # res = a - floor(a/b) * b
    # Newton's Method for VREC
    para = akg.lang.cce.vrec(b)
    for _ in range(3):
        tmp1 = akg.lang.cce.vmul(b, para)
        tmp2 = akg.lang.cce.vmuls(tmp1, -1)
        tmp3 = akg.lang.cce.vadds(tmp2, 2)
        para = akg.lang.cce.vmul(tmp3, para)

    c = akg.lang.cce.vmul(a, para)
    d = akg.lang.cce.floor(c)
    e = akg.lang.cce.vmul(d, b)
    res = akg.lang.cce.vsub(a, e)

    s = akg.tvm.create_schedule(res.op)

    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod = akg.build(s, [a, b, res],
                        "cce",
                        name=kernel_name,
                        attrs=attrs,
                        polyhedral=True)
        return mod
Ejemplo n.º 22
0
def elemwise_sum_manual_schedule(input_shape, polyhedral=False, attrs=None):
    """manually schedule"""
    b = akg.tvm.placeholder(input_shape, dtype='float16', name="b")
    c = akg.tvm.placeholder(input_shape, dtype='float16', name="c")
    a = akg.tvm.compute(input_shape, lambda *indices: b(*indices) + c(*indices))
    ss = akg.tvm.create_schedule([a.op])
    ss.cache_read(b, "local.UB", [a])
    ss.cache_read(c, "local.UB", [a])
    ss.cache_write(a, "local.UB")
    ss[a].set_scope("local.UB")
    with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True):
        mod = akg.build(ss,
                    [b, c, a],
                    "cce",
                    name="test_manual_schedule",
                    attrs=attrs,
                    polyhedral=polyhedral)
    return mod
Ejemplo n.º 23
0
def test_quant(fmap_shape):
    # input shape(NCHW -> NC1HWC0)
    in_n, in_c, in_h, in_w = fmap_shape
    assert in_c % 32 == 0
    input_shape_nc1hwc0 = (in_n, in_c // 16, in_h, in_w, 16)
    in_n, in_c1, in_h, in_w, in_c0 = input_shape_nc1hwc0

    # placeholder (NC1HWC0)
    FMap = akg.tvm.placeholder(input_shape_nc1hwc0,
                               dtype='float16',
                               name='FMap')

    ScaleQ = akg.tvm.placeholder((16, ), dtype='float16', name='ScaleQ')
    OffsetQ = akg.tvm.placeholder((16, ), dtype='float16', name='OffsetQ')

    out_shape_nc1hwc0 = (in_n, in_c // 32, in_h, in_w, 32)
    print(out_shape_nc1hwc0)
    out_n, out_c1, out_h, out_w, out_c0 = out_shape_nc1hwc0

    # quantize
    Quant = akg.tvm.compute(out_shape_nc1hwc0,
                            lambda n, c1, h, w, c0:
                            (FMap[n, c1 + c0 // 16, h, w, c0 % 16] * ScaleQ[0]
                             + OffsetQ[0]).astype('int8'),
                            name='output')

    info = dim.Dim()
    info.setdim(index=0, axis=0, tilel1=2, tilel0=0)
    info.setdim(index=0, axis=0, tilel1=32, tilel0=0)
    info.setdim(index=0, axis=0, tilel1=32, tilel0=0)
    info.setdim(index=0, axis=0, tilel1=16, tilel0=0)

    # schedule
    s = akg.tvm.create_schedule(Quant.op)
    with akg.build_config(add_lower_pass=utils.debug_mode(0),
                          dump_pass_ir=True):
        mod = akg.build(s, [FMap, ScaleQ, OffsetQ, Quant],
                        'cce',
                        name='cce_quant',
                        attrs={'dim': str(info)},
                        polyhedral=True)

    source_code = mod.imported_modules[0].get_source()
    print(source_code)
Ejemplo n.º 24
0
def test_vmadd():
    shape = (10, 256)
    dtype = 'float16'

    x = akg.tvm.placeholder(shape, name="x", dtype=dtype)

    def compute_func(*indices):
        y = x(*indices) + akg.tvm.const(2.0, dtype)
        return y * x(*indices) + x(*indices) + akg.tvm.const(1.0, dtype)

    res = akg.tvm.compute(shape, compute_func)

    s = akg.tvm.create_schedule(res.op)

    # build the cce kernel
    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod = akg.build(s, [x, res], "cce", polyhedral=True)

    assert "vmadd" in mod.imported_modules[0].get_source()
Ejemplo n.º 25
0
def conv_relu(fmap_shape,
              filter_shape,
              pad_,
              stride_,
              dilation_,
              tile_hh=0,
              tile_coco=0,
              tile_mm=0,
              tile_kk=0,
              tile_nn=0,
              bypass_l1=False,
              use_bias=False,
              block_size=16,
              conv_dtype='float16'):
    conv, a_value, b_value, bias_value, kernel_name, dim_info = add_a_conv_compute(
        fmap_shape, filter_shape, pad_, stride_, dilation_, tile_hh, tile_coco,
        tile_mm, tile_kk, tile_nn, bypass_l1, use_bias, block_size, conv_dtype)
    # leakly relu
    negative_slope = 0.0
    slope_tmp = akg.tvm.const(negative_slope, dtype=conv_dtype)
    # negative_slope*x
    out = akg.lang.ascend.vmuls(conv, slope_tmp)
    # max(x,negative_slope*x)
    out = akg.lang.ascend.vmax(out, conv)
    # schedule
    s = akg.tvm.create_schedule(conv.op)
    with akg.build_config(add_lower_pass=utils.debug_mode(0),
                          dump_pass_ir=True):

        if use_bias:
            mod = akg.build(s, [a_value, b_value, bias_value, conv],
                            "cce",
                            name=kernel_name,
                            attrs={"dim": dim_info},
                            polyhedral=True)
        else:
            mod = akg.build(s, [a_value, b_value, conv],
                            "cce",
                            name=kernel_name,
                            attrs={"dim": dim_info},
                            polyhedral=True)
    return mod
Ejemplo n.º 26
0
def gen_spaces_dim_key(op_func, s, op_var, kernel_name, attrs, polyhedral,
                       tuning, target):
    """
    Generate tiling parameter.

    Args:
        op_func (function returning an op or (op, [op_vars])): The op build function.
        s (dict): schedule of op.
        op_var (list): the akg.tvm.tensor of inputs and outputs for op.
        kernel_name (str): name of op.
        attrs (dict): tiling parameter.
        polyhedral (bool): True by default.
        tuning (bool): False by default.

    Return:
        tiling parameter.
    """
    set_dim_key = ""
    if op_func.__name__ in ct_util.set_dim_func_map.keys():
        func_ = ct_util.set_dim_func_map[op_func.__name__]
        if inspect.isfunction(func_):
            set_dim_key = func_(*args)[1]
    elif op_func.__name__ in ct_util.gen_key_func_map.keys():
        func_ = ct_util.gen_key_func_map[op_func.__name__]
        if inspect.isfunction(func_):
            set_dim_key = func_(*args)
    with akg.build_config(dump_pass_ir=True):
        spaces = akg.lower(s,
                           op_var,
                           name=kernel_name,
                           attrs=attrs,
                           polyhedral=polyhedral,
                           tuning=tuning,
                           target=target)
        if set_dim_key == "":
            set_dim_key = str(args)
        return spaces, set_dim_key
Ejemplo n.º 27
0
def reduce_min_ad_optimized_manual_schedule(input_shape,
                                            dtype,
                                            axis,
                                            keepdims,
                                            polyhedral=True,
                                            attrs=None):
    def get_shape(pld):
        return [d.value for d in pld.shape]

    data = akg.tvm.placeholder(input_shape, dtype, name="input_data")

    #only works for last axis and 2D. Need to extend to multiple dimension and axes.
    def custom_reduce_min_fdiff(out, inputs, grad, ad_attrs, new_pld_array):
        data = inputs[0]
        shape = get_shape(data)
        if len(get_shape(data)) == 2:
            # add an extra stage to avoid alignment problem
            min_input = akg.tvm.compute(data.shape,
                                        lambda *i: data(*i),
                                        name="min_input")
            min_ = akg.lang.cce.reduce_min(min_input, axis=-1, keepdims=True)
            min_broadcast = akg.lang.cce.broadcast(min_, shape)
            if dtype != "float16":
                data = cast(data, "float16")
            return [
                akg.tvm.compute(shape,
                                lambda i, j: akg.tvm.expr.Select(
                                    data[i, j] == min_broadcast[i, j], grad[i],
                                    akg.tvm.const(0, dtype="float16")),
                                name="reduce_min_ad2")
            ]

    L = reduce_min.reduce_min(data, axis)
    head = akg.tvm.placeholder(L.shape, name="head", dtype=L.dtype)
    head_cast = cast(head, "float16")

    [dL_ddata
     ] = akg.differentiate(L, [data],
                           head_cast,
                           None,
                           None,
                           override={L: ([data], custom_reduce_min_fdiff)})

    s = akg.tvm.create_schedule([dL_ddata.op])

    head_ub = s.cache_read(head, "local.UB", [head_cast])
    if dtype == "float16":
        data_ub = s.cache_read(data, "local.UB", [dL_ddata])
    else:
        data_ub = s.cache_read(data, "local.UB",
                               [dL_ddata.op.input_tensors[0]])
        min_input_ub = s.cache_read(
            dL_ddata.op.input_tensors[1].op.input_tensors[0].op.
            input_tensors[0].op.input_tensors[0].op.input_tensors[0],
            "local.UB", [
                dL_ddata.op.input_tensors[1].op.input_tensors[0].op.
                input_tensors[0].op.input_tensors[0]
            ])
        s[dL_ddata.op.input_tensors[1].op.input_tensors[0].op.input_tensors[0].
          op.input_tensors[0]].set_scope("local.UB")

    dL_ddata_ub = s.cache_write(dL_ddata, "local.UB")

    # tiling
    split_axis = {}
    for i in range(len(attrs['tile'])):
        split_axis["axis" + str(i)] = s[dL_ddata].split(
            dL_ddata.op.axis[i], attrs["tile"][i])

    split_axis_sorted = sorted(split_axis.items())

    if dtype == "float16":
        s[data_ub].compute_at(s[dL_ddata], split_axis_sorted[-1][1][0])
    else:
        s[data_ub].compute_at(s[dL_ddata], split_axis_sorted[-1][1][0])
        s[dL_ddata.op.input_tensors[0]].compute_at(s[dL_ddata],
                                                   split_axis_sorted[-1][1][0])
        s[dL_ddata.op.input_tensors[0]].set_scope("local.UB")
        s[min_input_ub].compute_at(s[dL_ddata], split_axis_sorted[0][1][1])

    s[head_ub].compute_at(s[dL_ddata], split_axis_sorted[-1][1][0])
    s[head_cast].compute_at(s[dL_ddata], split_axis_sorted[-1][1][0])
    s[head_cast].set_scope("local.UB")
    s[dL_ddata.op.input_tensors[1]].compute_at(s[dL_ddata],
                                               split_axis_sorted[-1][1][0])
    s[dL_ddata.op.input_tensors[1]].set_scope("local.UB")
    s[dL_ddata.op.input_tensors[1].op.input_tensors[0]].compute_at(
        s[dL_ddata], split_axis_sorted[0][1][1])
    s[dL_ddata.op.input_tensors[1].op.input_tensors[0]].set_scope("local.UB")
    s[dL_ddata.op.input_tensors[1].op.input_tensors[0].op.
      input_tensors[0]].compute_at(s[dL_ddata], split_axis_sorted[0][1][1])
    s[dL_ddata.op.input_tensors[1].op.input_tensors[0].op.
      input_tensors[0]].set_scope("local.UB")

    # L is not being used for computation
    # s[L].compute_at(s[dL_ddata], split_axis_sorted[-1][1][0])
    # s[L].set_scope("local.UB"1

    s[dL_ddata_ub].compute_at(s[dL_ddata], split_axis_sorted[-1][1][0])

    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod = akg.build(s, [data, head, dL_ddata],
                        "cce",
                        name="reduce_min_ad_manual_schedule",
                        attrs=attrs,
                        polyhedral=polyhedral)
        source_code = mod.imported_modules[0].get_source()
        kernel_name = "reduce_min_ad_manual_schedule"
        utils.create_code(kernel_name, './', source_code)
    return mod
Ejemplo n.º 28
0
def conv_02(fmap_shape,
            filter_shape,
            pad_,
            stride_,
            dilation_,
            tile_hh=0,
            tile_coco=0,
            tile_mm=0,
            tile_kk=0,
            tile_nn=0,
            bypass_l1=False,
            use_bias=False,
            block_size=16,
            conv_dtype='float16'):

    # input shape (NCHW -> NC1HWC0)
    in_n, in_c, in_h, in_w = fmap_shape
    in_c = (in_c + block_size - 1) // block_size * block_size
    # kernel shape (NCHW -> NC1HWC0 -> Fractal)
    k_n, k_c, k_h, k_w = filter_shape
    k_c = (k_c + block_size - 1) // block_size * block_size
    k_n = (k_n + block_size - 1) // block_size * block_size

    input_shape_nc1hwc0 = (in_n, in_c // block_size, in_h, in_w, block_size)
    in_n, _, in_h, in_w, _ = input_shape_nc1hwc0

    kernel_shape_nc1hwc0 = (k_n, k_c // block_size, k_h, k_w, block_size)
    k_n, _, k_h, k_w, _ = kernel_shape_nc1hwc0
    kernel_shape_fractal = (k_c // block_size * k_h * k_w, k_n // block_size,
                            block_size, block_size)

    # A placeholder (NC1HWCO)
    A = akg.tvm.placeholder(input_shape_nc1hwc0,
                            dtype=conv_dtype,
                            name="input0")
    # B_placeholder (fractal)
    B = akg.tvm.placeholder(kernel_shape_fractal,
                            dtype=conv_dtype,
                            name="input1")

    if use_bias:
        bias_shape_nc1hwc0 = (1, k_n // block_size, 1, 1, block_size)
        bias_name = "input2"
        bias_value = akg.tvm.placeholder(bias_shape_nc1hwc0,
                                         dtype=conv_dtype,
                                         name=bias_name)
    else:
        bias_name = 'None'
        bias_value = None

    conv_forward = conv_compute_forward(fmap_shape, filter_shape, pad_,
                                        stride_, dilation_, A, B, bias_value,
                                        tile_hh, tile_coco, tile_mm, tile_kk,
                                        tile_nn, bypass_l1, use_bias,
                                        block_size, conv_dtype)

    k_hw = k_h * k_w
    const_shift = k_hw - 1

    # B in Fractal format; result in Fractal format
    def flip_weight(B, k_c, k_hw, const_shift):
        out_shape = (B.shape[1].value * k_hw, k_c // block_size, block_size,
                     block_size)
        B_flip = akg.tvm.compute(
            out_shape,
            lambda i0, i1, i2, i3: B[i1 * k_hw + const_shift - truncmod(
                i0, k_hw),
                                     floordiv(i0, k_hw), i3, i2],
            name=B.name + "_flipped")
        return B_flip

    # H in 5D format; result in 5D format
    def strided_head(H, s_h, s_w):
        n, c1, h, w, c0 = H.shape
        out_shape = (n, c1, (h - 1) * s_h + 1, (w - 1) * s_w + 1, c0)
        H_strided = akg.tvm.compute(
            out_shape,
            lambda i0, i1, i2, i3, i4: akg.tvm.expr.Select(
                akg.tvm.any(truncmod(i2, s_h) != 0,
                            truncmod(i3, s_w) != 0),
                akg.tvm.const(0.0, dtype="float16"), H[i0, i1,
                                                       floordiv(i2, s_h),
                                                       floordiv(i3, s_w), i4]),
            name=H.name + "_strided")

        return H_strided

    # A in 5D format; result in 5D format
    def transpose_data(A):
        out_shape = (A.shape[1].value * block_size,
                     A.shape[0].value // block_size, A.shape[2].value,
                     A.shape[3].value, block_size)

        A_transpose = akg.tvm.compute(
            out_shape,
            lambda j0, j1, j2, j3, j4: A[j1 * block_size + j4,
                                         floordiv(j0, block_size), j2, j3,
                                         truncmod(j0, block_size)],
            name=A.name + "_transposed")
        return A_transpose

    # Head is in 5D format; result in Fractal format
    def transpose_convert_head(Head):
        out_shape = ((Head.shape[0].value // block_size) *
                     Head.shape[2].value * Head.shape[3].value,
                     Head.shape[1].value, block_size, block_size)
        tmp_6D_shape = (Head.shape[0].value // block_size, block_size,
                        Head.shape[1].value, Head.shape[2].value,
                        Head.shape[3].value, block_size)
        Head_6D = akg.topi.reshape(Head, tmp_6D_shape)
        Head_6D_transpose = akg.topi.transpose(Head_6D, (0, 3, 4, 2, 5, 1))
        Head_transpose_convert = akg.topi.reshape(Head_6D_transpose, out_shape)
        return Head_transpose_convert

    HEAD = akg.tvm.placeholder(conv_forward.shape,
                               name="Head",
                               dtype='float16')
    Head_transposed_NCHW = (HEAD.shape[1].value * HEAD.shape[4].value,
                            HEAD.shape[0].value, HEAD.shape[2].value,
                            HEAD.shape[3].value)
    s_h, s_w = stride_
    Head_strided_NCHW = (HEAD.shape[0].value,
                         HEAD.shape[1].value * HEAD.shape[4].value,
                         (HEAD.shape[2].value - 1) * s_h + 1,
                         (HEAD.shape[3].value - 1) * s_w + 1)

    A_transposed_NCHW = (in_c, in_n, in_h, in_w)
    K_flip_rot_NCHW = (k_c, k_n, k_h, k_w)

    Head_transposed_converted = transpose_convert_head(HEAD)
    pld_Head_transposed_converted = akg.tvm.placeholder(
        Head_transposed_converted.shape,
        name="Head_trans_fractal",
        dtype=conv_dtype)
    A_transposed = transpose_data(A)
    pld_A_transposed = akg.tvm.placeholder(A_transposed.shape,
                                           name="A_trans",
                                           dtype=conv_dtype)

    info = dim.Dim()
    info.setdim(index=0, axis=0, tilel1=1, tilel0=1)
    info.setdim(index=0, axis=1, tilel1=1, tilel0=1)
    info.setdim(index=0, axis=2, tilel1=1, tilel0=1)
    info.setdim(index=0, axis=3, tilel1=1, tilel0=1)

    B_flip = flip_weight(B, k_c, k_hw, const_shift)
    pld_B_flipped = akg.tvm.placeholder(B_flip.shape,
                                        name="B_flip",
                                        dtype=conv_dtype)

    s_flipped = akg.tvm.create_schedule(B_flip.op)
    with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True):
        mod_weight_flipped = akg.build(s_flipped, [B, B_flip],
                                       "cce",
                                       name=B.name + "_flipped",
                                       attrs={"dim": str(info)},
                                       polyhedral=True)

    s_transposed_converted = akg.tvm.create_schedule(
        Head_transposed_converted.op)

    with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True):
        mod_head_transposed_converted = akg.build(
            s_transposed_converted, [HEAD, Head_transposed_converted],
            "cce",
            name="H_trans_converted",
            attrs={"dim": str(info)},
            polyhedral=True)

    Head_strided = strided_head(HEAD, s_h, s_w)
    pld_Head_strided = akg.tvm.placeholder(Head_strided.shape,
                                           name="Head_trans_5D",
                                           dtype=conv_dtype)

    s_strided = akg.tvm.create_schedule(Head_strided.op)
    with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True):
        mod_head_strided = akg.build(s_strided, [HEAD, Head_strided],
                                     "cce",
                                     name="H_strided",
                                     attrs={"dim": str(info)},
                                     polyhedral=True)

    s_transposed = akg.tvm.create_schedule(A_transposed.op)

    with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True):
        mod_transposed = akg.build(s_transposed, [A, A_transposed],
                                   "cce",
                                   name="A_transposed",
                                   attrs={"dim": str(info)},
                                   polyhedral=True)

    ad_attrs = {"ad_conv_enable": 1, "ad_conv_reuse_conv": 1}
    jacs = list(
        akg.differentiate(conv_forward, [A], HEAD, ad_attrs,
                          [pld_Head_strided, pld_B_flipped, None]))
    info = set_dims(Head_strided_NCHW, (k_c, k_n, k_h, k_w),
                    (k_h - 1, k_w - 1), (1, 1), (1, 1), tile_hh, tile_coco,
                    tile_mm, tile_kk, tile_nn, block_size)

    sjac = akg.tvm.create_schedule([jacs[0].op])
    with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True):
        mod_AD_data = akg.build(sjac,
                                [pld_Head_strided, pld_B_flipped, jacs[0]],
                                "cce",
                                name="conv_AD_data",
                                attrs={"dim": str(info)},
                                polyhedral=True)

    conv_data = conv_compute_forward(Head_strided_NCHW, K_flip_rot_NCHW,
                                     (k_h - 1, k_h - 1, k_w - 1, k_w - 1),
                                     (1, 1), (1, 1), pld_Head_strided,
                                     pld_B_flipped, None, tile_hh, tile_coco,
                                     tile_mm, tile_kk, tile_nn, bypass_l1,
                                     use_bias, block_size, conv_dtype)

    info = set_dims(Head_strided_NCHW, (k_c, k_n, k_h, k_w),
                    (k_h - 1, k_w - 1), (1, 1), (1, 1), tile_hh, tile_coco,
                    tile_mm, tile_kk, tile_nn, block_size)

    s_data = akg.tvm.create_schedule(conv_data.op)

    with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True):
        _ = akg.build(s_data, [pld_Head_strided, pld_B_flipped, conv_data],
                      "cce",
                      name="conv_data",
                      attrs={"dim": str(info)},
                      polyhedral=True)

    ad_attrs = {"ad_conv_enable": 1, "ad_conv_reuse_conv": 1}
    jacs = list(
        akg.differentiate(
            conv_forward, [B], HEAD, ad_attrs,
            [pld_A_transposed, pld_Head_transposed_converted, None]))
    info = set_dims(A_transposed_NCHW, Head_transposed_NCHW, (0, 0), (1, 1),
                    (s_h, s_w), tile_hh, tile_coco, tile_mm, tile_kk, tile_nn,
                    block_size)

    sjac = akg.tvm.create_schedule([jacs[0].op])
    with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True):
        mod_AD_weight = akg.build(
            sjac, [pld_A_transposed, pld_Head_transposed_converted, jacs[0]],
            "cce",
            name="conv_AD_weight",
            attrs={"dim": str(info)},
            polyhedral=True)

    conv_weight = conv_compute_forward(
        A_transposed_NCHW, Head_transposed_NCHW, (0, 0, 0, 0), (1, 1),
        (s_h, s_w), pld_A_transposed, pld_Head_transposed_converted, None,
        tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, bypass_l1, use_bias,
        block_size, conv_dtype)

    info = set_dims(A_transposed_NCHW, Head_transposed_NCHW, (0, 0), (1, 1),
                    (s_h, s_w), tile_hh, tile_coco, tile_mm, tile_kk, tile_nn,
                    block_size)

    s_weight = akg.tvm.create_schedule(conv_weight.op)

    with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True):
        akg.build(
            s_weight,
            [pld_A_transposed, pld_Head_transposed_converted, conv_weight],
            "cce",
            name="conv_weight",
            attrs={"dim": str(info)},
            polyhedral=True)

    return mod_AD_data, mod_AD_weight, mod_transposed, mod_head_transposed_converted, mod_head_strided, mod_weight_flipped
Ejemplo n.º 29
0
def conv_01(fmap_shape,
            filter_shape,
            pad_,
            stride_,
            dilation_,
            tile_hh=0,
            tile_coco=0,
            tile_mm=0,
            tile_kk=0,
            tile_nn=0,
            use_bias=False,
            block_size=16,
            conv_dtype='float16'):

    # input shape (NCHW -> NC1HWC0)
    in_n, in_c, in_h, in_w = fmap_shape
    in_c = (in_c + block_size - 1) // block_size * block_size
    # kernel shape (NCHW -> NC1HWC0 -> Fractal)
    k_n, k_c, k_h, k_w = filter_shape
    k_c = (k_c + block_size - 1) // block_size * block_size
    k_n = (k_n + block_size - 1) // block_size * block_size

    input_shape_nc1hwc0 = (in_n, in_c // block_size, in_h, in_w, block_size)

    kernel_shape_nc1hwc0 = (k_n, k_c // block_size, k_h, k_w, block_size)
    k_n, _, k_h, k_w, _ = kernel_shape_nc1hwc0
    kernel_shape_fractal = (k_c // block_size * k_h * k_w, k_n // block_size,
                            block_size, block_size)

    # A placeholder (NC1HWCO)
    A = akg.tvm.placeholder(input_shape_nc1hwc0,
                            dtype=conv_dtype,
                            name="input0")
    # B_placeholder (fractal)
    B = akg.tvm.placeholder(kernel_shape_fractal,
                            dtype=conv_dtype,
                            name="input1")
    data = [A, B]
    if use_bias:
        bias_shape_nc1hwc0 = (1, k_n // block_size, 1, 1, block_size)
        bias_name = "input2"
        bias_value = akg.tvm.placeholder(bias_shape_nc1hwc0,
                                         dtype=conv_dtype,
                                         name=bias_name)
        data.append(bias_value)
    else:
        bias_name = 'None'
        bias_value = None

    conv, _ = Conv(data, fmap_shape, filter_shape, pad_, stride_, dilation_,
                   use_bias)

    kernel_name = 'conv_ad'

    k_n, k_c, k_h, k_w = filter_shape
    k_c = (k_c + block_size - 1) // block_size * block_size
    k_n = (k_n + block_size - 1) // block_size * block_size
    k_hw = k_h * k_w
    const_shift = k_hw - 1

    # B in Fractal format; result in Fractal format
    def flip_weight(B, k_c, k_hw, const_shift):
        out_shape = (B.shape[1].value * k_hw, k_c // block_size, block_size,
                     block_size)
        B_flip = akg.tvm.compute(
            out_shape,
            lambda i0, i1, i2, i3: B[i1 * k_hw + const_shift - truncmod(
                i0, k_hw),
                                     floordiv(i0, k_hw), i3, i2],
            name=B.name + "_flipped")
        return B_flip

    def strided_head(H, s_h, s_w):
        n, c1, h, w, c0 = H.shape
        out_shape = (n, c1, (h - 1) * s_h + 1, (w - 1) * s_w + 1, c0)
        H_strided = akg.tvm.compute(
            out_shape,
            lambda i0, i1, i2, i3, i4: akg.tvm.expr.Select(
                akg.tvm.any(truncmod(i2, s_h) != 0,
                            truncmod(i3, s_w) != 0),
                akg.tvm.const(0.0, dtype="float16"), H[i0, i1,
                                                       floordiv(i2, s_h),
                                                       floordiv(i3, s_w), i4]),
            name=H.name + "_strided")
        return H_strided

    B_flip = flip_weight(B, k_c, k_hw, const_shift)

    pld_B_flip = akg.tvm.placeholder(B_flip.shape,
                                     name="inp1_flipped",
                                     dtype='float16')
    HEAD = akg.tvm.placeholder(conv.shape, name="Head", dtype='float16')

    HEAD_n, HEAD_c1, HEAD_h, HEAD_w, HEAD_c0 = HEAD.shape
    info = set_dims((HEAD_n.value, HEAD_c1.value * HEAD_c0.value, HEAD_h.value,
                     HEAD_w.value), (k_c, k_n, k_h, k_w), (2, 2), (1, 1),
                    (1, 1), tile_hh, tile_coco, tile_mm, tile_kk, tile_nn,
                    block_size)

    s_h, s_w = stride_
    if (s_h == 1) and (s_w == 1):
        ad_attrs = {"ad_conv_enable": 1, "ad_conv_reuse_conv": 1}
        jacs = list(
            akg.differentiate(conv, [A], HEAD, ad_attrs,
                              [HEAD, pld_B_flip, None]))
        sjac = akg.tvm.create_schedule([jacs[0].op])
        op_vars = [HEAD, pld_B_flip, jacs[0]]
        info = set_dims((HEAD_n.value, HEAD_c1.value * HEAD_c0.value,
                         HEAD_h.value, HEAD_w.value), (k_c, k_n, k_h, k_w),
                        (k_h - 1, k_w - 1), (1, 1), (1, 1), tile_hh, tile_coco,
                        tile_mm, tile_kk, tile_nn, block_size)
    else:
        Head_strided = strided_head(HEAD, s_h, s_w)
        pld_Head_strided = akg.tvm.placeholder(Head_strided.shape,
                                               name="head_strided",
                                               dtype='float16')

        ad_attrs = {"ad_conv_enable": 1, "ad_conv_reuse_conv": 1}
        jacs = list(
            akg.differentiate(conv, [A], HEAD, ad_attrs,
                              [pld_Head_strided, pld_B_flip, None]))
        sjac = akg.tvm.create_schedule([jacs[0].op])
        op_vars = [pld_Head_strided, pld_B_flip, jacs[0]]
        h_n, h_c1, h_h, h_w, h_c0 = pld_Head_strided.shape
        info = set_dims(
            (h_n.value, h_c1.value * h_c0.value, h_h.value, h_w.value),
            (k_c, k_n, k_h, k_w), (k_h - 1, k_w - 1), (1, 1), (1, 1), tile_hh,
            tile_coco, tile_mm, tile_kk, tile_nn, block_size)

    with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True):
        mod_backward = akg.build(sjac,
                                 op_vars,
                                 "cce",
                                 name=kernel_name,
                                 attrs={"dim": str(info)},
                                 polyhedral=True)

    def transpose_data(A):
        out_shape = (A.shape[1] * block_size, truncdiv(A.shape[0], block_size),
                     A.shape[2], A.shape[3], block_size)
        A_transpose = akg.tvm.compute(
            out_shape,
            lambda j0, j1, j2, j3, j4: A[j1 * block_size + j4,
                                         truncdiv(j0, block_size), j2, j3,
                                         truncmod(j0, block_size)],
            name=A.name + "_transposed")
        return A_transpose

    # Head is in 5D format
    # Output is in Fractal format
    def transpose_convert_head(Head):
        out_shape = ((floordiv(Head.shape[0].value, block_size)) *
                     Head.shape[2].value * Head.shape[3].value,
                     Head.shape[1].value, block_size, block_size)
        tmp_6D_shape = (floordiv(Head.shape[0].value,
                                 block_size), block_size, Head.shape[1].value,
                        Head.shape[2].value, Head.shape[3].value, block_size)

        Head_6D = akg.topi.reshape(Head, tmp_6D_shape)
        # Transpose from (N//block_size_N, block_size_N, C//block_size_C, H, W, block_size_C)
        #           to   (N//block_size_N, H, W, C//block_size_C, block_size_C, block_size_N,)
        Head_6D_transpose = akg.topi.transpose(Head_6D, (0, 3, 4, 2, 5, 1))
        Head_transpose_convert = akg.topi.reshape(Head_6D_transpose, out_shape)
        return Head_transpose_convert

    X_transposed = transpose_data(A)
    pld_X_transposed = akg.tvm.placeholder(X_transposed.shape,
                                           name="inp0_transposed",
                                           dtype='float16')

    if (s_h > 1) or (s_w > 1):
        Head_transposed_converted = strided_head(HEAD, s_h, s_w)
    else:
        Head_transposed_converted = HEAD

    strided_head_n, strided_head_c1, strided_head_h, strided_head_w, strided_head_c0 = Head_transposed_converted.shape
    Head_transposed_converted = transpose_convert_head(
        Head_transposed_converted)

    _ = akg.tvm.create_schedule(Head_transposed_converted.op)

    pld_Head_transposed_converted = akg.tvm.placeholder(
        Head_transposed_converted.shape,
        name="head_transposed",
        dtype='float16')
    ad_attrs = {"ad_conv_enable": 1, "ad_conv_reuse_conv": 1}
    jacs = list(
        akg.differentiate(
            conv, [B], HEAD, ad_attrs,
            [pld_X_transposed, pld_Head_transposed_converted, None]))
    sjac = akg.tvm.create_schedule([jacs[0].op])

    op_vars = [HEAD, pld_X_transposed, pld_Head_transposed_converted, jacs[0]]
    in_n, in_c1, in_h, in_w, in_c0 = A.shape
    info = set_dims(
        (in_c1.value * in_c0.value, in_n.value, in_h.value, in_w.value),
        (strided_head_c1.value * strided_head_c0.value, strided_head_n.value,
         strided_head_h.value, strided_head_w.value), (0, 0), (1, 1), (1, 1),
        tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)

    with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True):
        mod_backward2 = akg.build(sjac,
                                  op_vars,
                                  "cce",
                                  name="conv_backward_weight",
                                  attrs={"dim": str(info)},
                                  polyhedral=True)

    return mod_backward, mod_backward2
Ejemplo n.º 30
0
def create_gpu_mod(sch_tmpl, s, op_func, op_var, shape_var, kernel_name, attrs,
                   polyhedral, binds, dump_ir, dump_code, tuning):
    """
    Return module for op of gpu.

    Args:
        sch_tmpl (dict): schedule of op and the others.
        s (dict): schedule of op.
        op_func (function returning an op or (op, [op_vars])): The op build function.
        op_var (list): the akg.tvm.tensor of inputs and outputs for op.
        shape_var (list): shape of inputs and extra attributes for the op.
        kernel_name (str): name of op.
        attrs (dict): tiling parameter.
        polyhedral (bool): True by default.
        binds (dict): BINDS
        dump_ir (bool): True by default.
        dump_code (bool): False by default.
        tuning (bool): False by default.

    Return:
        module.
    """

    if sch_tmpl is not None or (attrs
                                and attrs.get("target", "cce") == "cuda"):
        if kernel_name == "":
            kernel_name = op_func.__name__ if sch_tmpl is None else sch_tmpl[
                'op_name']

    target = CUDA

    if sch_tmpl is not None:
        if sch_tmpl['target'] != CUDA:
            raise ValueError(
                "Only support cuda as target when using schedule template.")
        global kc_air_mode
        kc_air_mode = "CUDA"
        with akg.tvm.target.cuda() as target:
            if not tuning:
                s = sch_tmpl['schedule'](sch_tmpl['output'])
                with akg.tvm.build_config(dump_pass_ir=dump_ir):
                    mod = akg.build(s,
                                    op_var,
                                    "cuda",
                                    shape_var,
                                    name=kernel_name,
                                    attrs=attrs,
                                    polyhedral=False,
                                    binds=binds)
            else:

                @autotvm.template
                def _autotune_template():
                    s = sch_tmpl['schedule'](sch_tmpl['output'])
                    return (s, op_var)

                # create autotune task
                task = autotvm.task.create(_autotune_template,
                                           args=list(),
                                           target='cuda')

                print("task config: ", task.config_space)

                # set measure_option
                measure_option = autotvm.measure_option(
                    builder=autotvm.LocalBuilder(),
                    runner=autotvm.LocalRunner(repeat=5,
                                               min_repeat_ms=150,
                                               timeout=4))

                # Begin tuning, log records to file `kernel_name.log`
                tuner = autotvm.tuner.RandomTuner(task)
                if not os.path.exists(kernel_name + '.log'):
                    tuner.tune(n_trial=len(task.config_space),
                               measure_option=measure_option,
                               callbacks=[
                                   autotvm.callback.log_to_file(kernel_name +
                                                                '.log')
                               ])

                # query best config
                dispatch_context = autotvm.apply_history_best(kernel_name +
                                                              '.log')
                best_config = dispatch_context.query(task.target,
                                                     task.workload)
                print("\nBest config is:")
                print(best_config)

                # apply best config
                with autotvm.apply_history_best(kernel_name + '.log'):
                    s, op_var = _autotune_template()
                    mod = akg.build(s,
                                    op_var,
                                    "cuda",
                                    shape_var,
                                    name=kernel_name,
                                    attrs=attrs,
                                    polyhedral=False,
                                    binds=gpu_binds)
    else:
        with akg.build_config(dump_pass_ir=dump_ir):
            mod = akg.build(s,
                            op_var,
                            target,
                            shape_var,
                            name=kernel_name,
                            attrs=attrs,
                            polyhedral=polyhedral,
                            binds=binds)
    if dump_code:
        source_code = mod.imported_modules[0].get_source()
        create_code(kernel_name, "./", source_code, CUDA)
    return mod