Ejemplo n.º 1
0
def add_b_conv(fmap_shape, filter_shape, pad_, stride_, dilation_,
               tile_hh=0, tile_coco=0, tile_mm=0, tile_kk=0, tile_nn=0, bypass_l1=False,
               use_bias=False, block_size=16, conv_dtype='float16'):
    conv, a_value, b_value, bias_value, kernel_name, dim_info = add_b_conv_compute(fmap_shape, filter_shape, pad_, stride_, dilation_,
                                                                                   tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, bypass_l1,
                                                                                   use_bias, block_size, conv_dtype)
    # schedule
    s = akg.tvm.create_schedule(conv.op)
    print(conv, a_value, b_value, bias_value)

    attrs = {}
    attrs["pragma_reschedule"] = True
    attrs["pragma_rmselfdep"] = False
    attrs['dim'] = dim_info
    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):

        if use_bias:
            mod = akg.build(s, [a_value, b_value, bias_value, conv], "cce", name=kernel_name, attrs=attrs, polyhedral=True)
        else:
            mod = akg.build(s, [a_value, b_value, conv], "cce", name=kernel_name, attrs=attrs, polyhedral=True)
    source_code = mod.imported_modules[0].get_source()
    cce_path = '.'
    utils.create_code(kernel_name, cce_path, source_code)

    return mod
Ejemplo n.º 2
0
def div_mod_issue(data_shape, weight_shape, case_number):

    if (case_number == 0):
        A = akg.tvm.placeholder(data_shape, dtype='float16', name='input0')
        divisor = 2
        stage1 = akg.tvm.compute(
            data_shape,
            lambda n, c, h, w: A[n, c / divisor, h, w] + 1,
            name="stage1")
        op_vars = [A, stage1]
        s = akg.tvm.create_schedule([stage1.op])
        akg.lower(s, op_vars, simple_mode=True, polyhedral=True)
        with akg.build_config(add_lower_pass=cce.debug_mode(0),
                              dump_pass_ir=True):
            mod = akg.build(s, op_vars, "cce", name="test1", polyhedral=True)
        return mod
    else:
        A = akg.tvm.placeholder(data_shape, dtype='float16', name='input0')
        B = akg.tvm.placeholder(weight_shape, dtype='float16', name='input1')

        divisor = 3
        stage1 = akg.tvm.compute(
            data_shape,
            lambda n, c, h, w: A[n, c / divisor, h, w] + 1,
            name="stage1")
        stage2 = akg.tvm.compute(
            weight_shape,
            lambda n, c, h, w: stage1[0, c, 0, 0] + B[n, c, h, w],
            name="stage2")
        op_vars = [A, B, stage2]

        s = akg.tvm.create_schedule([stage2.op])
        akg.lower(s, op_vars, simple_mode=True, polyhedral=True)

        with akg.build_config(add_lower_pass=cce.debug_mode(0),
                              dump_pass_ir=True):
            mod_stage2 = akg.build(s,
                                   op_vars,
                                   "cce",
                                   name="test2",
                                   polyhedral=True)
        return mod_stage2
Ejemplo n.º 3
0
def range_run(start, limit, delta, dtype, attrs):
    t_range = tvm_range.range_value(start, limit, delta, dtype)
    # Create module
    sch = akg.tvm.create_schedule(t_range.op)
    kernel_name = "range"
    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod = akg.build(sch, [t_range], "cce", name=kernel_name, attrs=attrs, polyhedral=True)
        print(mod.imported_modules[0].get_source())
    # Generate data for testing the op
    expect = np.asarray(list(range(start, limit, delta)))

    output = np.full((max(0, (limit - start) / delta),), np.nan, dtype)
    output = utils.mod_launch(mod, (output, ), expect=expect)

    return tuple(), output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True)
Ejemplo n.º 4
0
def elemwise_sum_manual_schedule(input_shape, polyhedral=False, attrs=None):
    """manually schedule"""
    b = akg.tvm.placeholder(input_shape, dtype='float16', name="b")
    c = akg.tvm.placeholder(input_shape, dtype='float16', name="c")
    a = akg.tvm.compute(input_shape,
                        lambda *indices: b(*indices) + c(*indices))
    ss = akg.tvm.create_schedule([a.op])
    ss.cache_read(b, "local.UB", [a])
    ss.cache_read(c, "local.UB", [a])
    ss.cache_write(a, "local.UB")
    ss[a].set_scope("local.UB")
    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod = akg.build(ss, [b, c, a],
                        "cce",
                        name="test_manual_schedule",
                        attrs=attrs,
                        polyhedral=polyhedral)
    return mod
Ejemplo n.º 5
0
def conv_relu(fmap_shape,
              filter_shape,
              pad_,
              stride_,
              dilation_,
              tile_hh=0,
              tile_coco=0,
              tile_mm=0,
              tile_kk=0,
              tile_nn=0,
              bypass_l1=False,
              use_bias=False,
              block_size=16,
              conv_dtype='float16'):
    conv, a_value, b_value, bias_value, kernel_name, dim_info = add_a_conv_compute(
        fmap_shape, filter_shape, pad_, stride_, dilation_, tile_hh, tile_coco,
        tile_mm, tile_kk, tile_nn, bypass_l1, use_bias, block_size, conv_dtype)
    # leakly relu
    negative_slope = 0.0
    slope_tmp = akg.tvm.const(negative_slope, dtype=conv_dtype)
    # negative_slope*x
    out = akg.lang.cce.vmuls(conv, slope_tmp)
    # max(x,negative_slope*x)
    out = akg.lang.cce.vmax(out, conv)
    # schedule
    s = akg.tvm.create_schedule(conv.op)
    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):

        if use_bias:
            mod = akg.build(s, [a_value, b_value, bias_value, conv],
                            "cce",
                            name=kernel_name,
                            attrs={"dim": dim_info},
                            polyhedral=True)
        else:
            mod = akg.build(s, [a_value, b_value, conv],
                            "cce",
                            name=kernel_name,
                            attrs={"dim": dim_info},
                            polyhedral=True)
    return mod
Ejemplo n.º 6
0
def conv_02(fmap_shape, filter_shape, pad_, stride_, dilation_,
            tile_hh=0, tile_coco=0, tile_mm=0, tile_kk=0, tile_nn=0, bypass_l1=False,
            use_bias=False, block_size=16, conv_dtype='float16'):

    # input shape (NCHW -> NC1HWC0)
    in_n, in_c, in_h, in_w = fmap_shape
    in_c = (in_c + block_size - 1) // block_size * block_size
    # kernel shape (NCHW -> NC1HWC0 -> Fractal)
    k_n, k_c, k_h, k_w = filter_shape
    k_c = (k_c + block_size - 1) // block_size * block_size
    k_n = (k_n + block_size - 1) // block_size * block_size

    input_shape_nc1hwc0 = (in_n, in_c // block_size, in_h, in_w, block_size)
    in_n, _, in_h, in_w, _ = input_shape_nc1hwc0

    kernel_shape_nc1hwc0 = (k_n, k_c // block_size, k_h, k_w, block_size)
    k_n, _, k_h, k_w, _ = kernel_shape_nc1hwc0
    kernel_shape_fractal = (k_c // block_size * k_h * k_w, k_n // block_size, block_size, block_size)

    # A placeholder (NC1HWCO)
    A = akg.tvm.placeholder(input_shape_nc1hwc0, dtype=conv_dtype, name="input0")
    # B_placeholder (fractal)
    B = akg.tvm.placeholder(kernel_shape_fractal, dtype=conv_dtype, name="input1")

    if use_bias:
        bias_shape_nc1hwc0 = (1, k_n // block_size, 1, 1, block_size)
        bias_name = "input2"
        bias_value = akg.tvm.placeholder(bias_shape_nc1hwc0, dtype=conv_dtype, name=bias_name)
    else:
        bias_name = 'None'
        bias_value = None

    conv_forward = conv_compute_forward(fmap_shape, filter_shape, pad_, stride_, dilation_, A, B, bias_value,
                                        tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, bypass_l1,
                                        use_bias, block_size, conv_dtype)

    k_hw = k_h * k_w
    const_shift = k_hw - 1

    # B in Fractal format; result in Fractal format
    def flip_weight(B, k_c, k_hw, const_shift):
        out_shape = (B.shape[1].value * k_hw, k_c // block_size, block_size, block_size)
        B_flip = akg.tvm.compute(out_shape,
                                 lambda i0, i1, i2, i3:
                                 B[i1 * k_hw + const_shift - truncmod(i0, k_hw), floordiv(i0, k_hw), i3, i2],
                                 name=B.name + "_flipped")
        return B_flip

    # H in 5D format; result in 5D format
    def strided_head(H, s_h, s_w):
        n, c1, h, w, c0 = H.shape
        out_shape = (n, c1, (h - 1) * s_h + 1, (w - 1) * s_w + 1, c0)
        H_strided = akg.tvm.compute(out_shape,
                                    lambda i0, i1, i2, i3, i4:
                                    akg.tvm.expr.Select(akg.tvm.any(truncmod(i2, s_h) != 0, truncmod(i3, s_w) != 0),
                                                        akg.tvm.const(0.0, dtype="float16"),
                                                        H[i0, i1, floordiv(i2, s_h), floordiv(i3, s_w), i4]),
                                    name=H.name + "_strided")

        return H_strided

    # A in 5D format; result in 5D format
    def transpose_data(A):
        out_shape = (A.shape[1].value * block_size, A.shape[0].value // block_size,
                     A.shape[2].value, A.shape[3].value, block_size)

        A_transpose = akg.tvm.compute(out_shape,
                                      lambda j0, j1, j2, j3, j4:
                                      A[j1 * block_size + j4, floordiv(j0, block_size), j2, j3, truncmod(j0, block_size)],
                                      name=A.name + "_transposed")
        return A_transpose

    # Head is in 5D format; result in Fractal format
    def transpose_convert_head(Head):
        out_shape = ((Head.shape[0].value // block_size) * Head.shape[2].value * Head.shape[3].value,
                     Head.shape[1].value, block_size, block_size)
        tmp_6D_shape = (Head.shape[0].value // block_size, block_size,
                        Head.shape[1].value, Head.shape[2].value, Head.shape[3].value, block_size)
        Head_6D = akg.topi.reshape(Head, tmp_6D_shape)
        Head_6D_transpose = akg.topi.transpose(Head_6D, (0, 3, 4, 2, 5, 1))
        Head_transpose_convert = akg.topi.reshape(Head_6D_transpose, out_shape)
        return Head_transpose_convert

    HEAD = akg.tvm.placeholder(conv_forward.shape, name="Head", dtype='float16')
    Head_transposed_NCHW = (HEAD.shape[1].value * HEAD.shape[4].value, HEAD.shape[0].value,
                            HEAD.shape[2].value, HEAD.shape[3].value)
    s_h, s_w = stride_
    Head_strided_NCHW = (HEAD.shape[0].value, HEAD.shape[1].value * HEAD.shape[4].value,
                         (HEAD.shape[2].value - 1) * s_h + 1, (HEAD.shape[3].value - 1) * s_w + 1)

    A_transposed_NCHW = (in_c, in_n, in_h, in_w)
    K_flip_rot_NCHW = (k_c, k_n, k_h, k_w)

    Head_transposed_converted = transpose_convert_head(HEAD)
    pld_Head_transposed_converted = akg.tvm.placeholder(Head_transposed_converted.shape,
                                                    name="Head_trans_fractal", dtype=conv_dtype)
    A_transposed = transpose_data(A)
    pld_A_transposed = akg.tvm.placeholder(A_transposed.shape, name="A_trans", dtype=conv_dtype)

    info = dim.Dim()
    info.setdim(index=0, axis=0, tilel1=1, tilel0=1)
    info.setdim(index=0, axis=1, tilel1=1, tilel0=1)
    info.setdim(index=0, axis=2, tilel1=1, tilel0=1)
    info.setdim(index=0, axis=3, tilel1=1, tilel0=1)

    B_flip = flip_weight(B, k_c, k_hw, const_shift)
    pld_B_flipped = akg.tvm.placeholder(B_flip.shape, name="B_flip", dtype=conv_dtype)

    s_flipped = akg.tvm.create_schedule(B_flip.op)
    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod_weight_flipped = akg.build(s_flipped, [B, B_flip], "cce", name=B.name + "_flipped",
                                      attrs={"dim": str(info)}, polyhedral=True)

    s_transposed_converted = akg.tvm.create_schedule(Head_transposed_converted.op)


    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod_head_transposed_converted = akg.build(s_transposed_converted, [HEAD, Head_transposed_converted],
                                                 "cce", name="H_trans_converted",
                                                  attrs={"dim": str(info)},
                                                  polyhedral=True)

    Head_strided = strided_head(HEAD, s_h, s_w)
    pld_Head_strided = akg.tvm.placeholder(Head_strided.shape, name="Head_trans_5D", dtype=conv_dtype)

    s_strided = akg.tvm.create_schedule(Head_strided.op)
    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod_head_strided = akg.build(s_strided, [HEAD, Head_strided],
                                    "cce", name="H_strided", attrs={"dim": str(info)}, polyhedral=True)

    s_transposed = akg.tvm.create_schedule(A_transposed.op)


    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod_transposed = akg.build(s_transposed, [A, A_transposed], "cce",
                                   name="A_transposed", attrs={"dim": str(info)}, polyhedral=True)

    ad_attrs = {"ad_conv_enable": 1, "ad_conv_reuse_conv": 1}
    jacs = list(akg.differentiate(conv_forward, [A], HEAD, ad_attrs, [pld_Head_strided, pld_B_flipped, None]))
    info = set_dims(Head_strided_NCHW, (k_c, k_n, k_h, k_w), (k_h - 1, k_w - 1), (1, 1), (1, 1),
                    tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)

    sjac = akg.tvm.create_schedule([jacs[0].op])
    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod_AD_data = akg.build(sjac, [pld_Head_strided, pld_B_flipped, jacs[0]], "cce",
                                name="conv_AD_data", attrs={"dim": str(info)}, polyhedral=True)


    conv_data = conv_compute_forward(Head_strided_NCHW, K_flip_rot_NCHW,
                                     (k_h - 1, k_h - 1, k_w - 1, k_w - 1), (1, 1), (1, 1),
                                     pld_Head_strided, pld_B_flipped, None,
                                     tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, bypass_l1,
                                     use_bias, block_size, conv_dtype)

    info = set_dims(Head_strided_NCHW, (k_c, k_n, k_h, k_w), (k_h - 1, k_w - 1), (1, 1), (1, 1),
                    tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)

    s_data = akg.tvm.create_schedule(conv_data.op)

    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod_data = akg.build(s_data, [pld_Head_strided, pld_B_flipped, conv_data], "cce",
                             name="conv_data", attrs={"dim": str(info)}, polyhedral=True)

    ad_attrs = {"ad_conv_enable": 1, "ad_conv_reuse_conv": 1}
    jacs = list(akg.differentiate(conv_forward, [B], HEAD, ad_attrs, [pld_A_transposed, pld_Head_transposed_converted, None]))
    info = set_dims(A_transposed_NCHW, Head_transposed_NCHW, (0, 0), (1, 1), (s_h, s_w),
                    tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)

    sjac = akg.tvm.create_schedule([jacs[0].op])
    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod_AD_weight = akg.build(sjac, [pld_A_transposed, pld_Head_transposed_converted, jacs[0]], "cce",
                                  name="conv_AD_weight", attrs={"dim": str(info)}, polyhedral=True)

    conv_weight = conv_compute_forward(A_transposed_NCHW, Head_transposed_NCHW,
                                       (0, 0, 0, 0), (1, 1), (s_h, s_w),
                                       pld_A_transposed, pld_Head_transposed_converted, None,
                                       tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, bypass_l1,
                                       use_bias, block_size, conv_dtype)

    info = set_dims(A_transposed_NCHW, Head_transposed_NCHW, (0, 0), (1, 1), (s_h, s_w),
                    tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)

    s_weight = akg.tvm.create_schedule(conv_weight.op)

    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod_weight = akg.build(s_weight, [pld_A_transposed, pld_Head_transposed_converted, conv_weight], "cce",
                               name="conv_weight", attrs={"dim": str(info)}, polyhedral=True)

    return mod_AD_data, mod_AD_weight, mod_transposed, mod_head_transposed_converted, mod_head_strided, mod_weight_flipped
Ejemplo n.º 7
0
def conv_01(fmap_shape, filter_shape, pad_, stride_, dilation_,
            tile_hh=0, tile_coco=0, tile_mm=0, tile_kk=0, tile_nn=0,
            use_bias=False, block_size=16, conv_dtype='float16'):

    # input shape (NCHW -> NC1HWC0)
    in_n, in_c, in_h, in_w = fmap_shape
    in_c = (in_c + block_size - 1) // block_size * block_size
    # kernel shape (NCHW -> NC1HWC0 -> Fractal)
    k_n, k_c, k_h, k_w = filter_shape
    k_c = (k_c + block_size - 1) // block_size * block_size
    k_n = (k_n + block_size - 1) // block_size * block_size

    input_shape_nc1hwc0 = (in_n, in_c // block_size, in_h, in_w, block_size)

    kernel_shape_nc1hwc0 = (k_n, k_c // block_size, k_h, k_w, block_size)
    k_n, _, k_h, k_w, _ = kernel_shape_nc1hwc0
    kernel_shape_fractal = (k_c // block_size * k_h * k_w, k_n // block_size, block_size, block_size)


    # A placeholder (NC1HWCO)
    A = akg.tvm.placeholder(input_shape_nc1hwc0, dtype=conv_dtype, name="input0")
    # B_placeholder (fractal)
    B = akg.tvm.placeholder(kernel_shape_fractal, dtype=conv_dtype, name="input1")
    data = [A, B]
    if use_bias:
        bias_shape_nc1hwc0 = (1, k_n // block_size, 1, 1, block_size)
        bias_name = "input2"
        bias_value = akg.tvm.placeholder(bias_shape_nc1hwc0, dtype=conv_dtype, name=bias_name)
        data.append(bias_value)
    else:
        bias_name = 'None'
        bias_value = None

    conv, _ = conv_origin.conv(data, fmap_shape, filter_shape, pad_, stride_, dilation_, use_bias)

    kernel_name = 'conv_ad'

    k_n, k_c, k_h, k_w = filter_shape
    k_c = (k_c + block_size - 1) // block_size * block_size
    k_n = (k_n + block_size - 1) // block_size * block_size
    k_hw = k_h * k_w
    const_shift = k_hw - 1

    # B in Fractal format; result in Fractal format
    def flip_weight(B, k_c, k_hw, const_shift):
        out_shape = (B.shape[1].value * k_hw, k_c // block_size, block_size, block_size)
        B_flip = akg.tvm.compute(out_shape,
                                 lambda i0, i1, i2, i3: B[i1 * k_hw + const_shift - truncmod(i0, k_hw),
                                                          floordiv(i0, k_hw), i3, i2],
                                 name=B.name + "_flipped")
        return B_flip

    def strided_head(H, s_h, s_w):
        n, c1, h, w, c0 = H.shape
        out_shape = (n, c1, (h - 1) * s_h + 1, (w - 1) * s_w + 1, c0)
        H_strided = akg.tvm.compute(out_shape, lambda i0, i1, i2, i3, i4:
                                    akg.tvm.expr.Select(akg.tvm.any(truncmod(i2, s_h) != 0,
                                                                    truncmod(i3, s_w) != 0),
                                                        akg.tvm.const(0.0, dtype="float16"),
                                                        H[i0, i1, floordiv(i2, s_h), floordiv(i3, s_w), i4]),
                                    name=H.name + "_strided")
        return H_strided

    B_flip = flip_weight(B, k_c, k_hw, const_shift)

    pld_B_flip = akg.tvm.placeholder(B_flip.shape, name="inp1_flipped", dtype='float16')
    HEAD = akg.tvm.placeholder(conv.shape, name="Head", dtype='float16')

    HEAD_n, HEAD_c1, HEAD_h, HEAD_w, HEAD_c0 = HEAD.shape
    info = set_dims((HEAD_n.value, HEAD_c1.value * HEAD_c0.value, HEAD_h.value, HEAD_w.value),
                    (k_c, k_n, k_h, k_w), (2, 2), (1, 1), (1, 1),
                    tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)

    s_h, s_w = stride_
    if (s_h == 1) and (s_w == 1):
        ad_attrs = {"ad_conv_enable": 1, "ad_conv_reuse_conv": 1}
        jacs = list(akg.differentiate(conv, [A], HEAD, ad_attrs, [HEAD, pld_B_flip, None]))
        sjac = akg.tvm.create_schedule([jacs[0].op])
        op_vars = [HEAD, pld_B_flip, jacs[0]]
        info = set_dims((HEAD_n.value, HEAD_c1.value * HEAD_c0.value, HEAD_h.value, HEAD_w.value),
                        (k_c, k_n, k_h, k_w), (k_h - 1, k_w - 1), (1, 1), (1, 1),
                        tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)
    else:
        Head_strided = strided_head(HEAD, s_h, s_w)
        pld_Head_strided = akg.tvm.placeholder(Head_strided.shape, name="head_strided", dtype='float16')

        ad_attrs = {"ad_conv_enable": 1, "ad_conv_reuse_conv": 1}
        jacs = list(akg.differentiate(conv, [A], HEAD, ad_attrs, [pld_Head_strided, pld_B_flip, None]))
        sjac = akg.tvm.create_schedule([jacs[0].op])
        op_vars = [pld_Head_strided, pld_B_flip, jacs[0]]
        h_n, h_c1, h_h, h_w, h_c0 = pld_Head_strided.shape
        info = set_dims((h_n.value, h_c1.value * h_c0.value, h_h.value, h_w.value), (k_c, k_n, k_h, k_w),
                        (k_h - 1, k_w - 1), (1, 1), (1, 1), tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)


    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod_backward = akg.build(sjac, op_vars, "cce", name=kernel_name, attrs={"dim": str(info)}, polyhedral=True)


    def transpose_data(A):
        out_shape = (A.shape[1] * block_size, truncdiv(A.shape[0], block_size), A.shape[2], A.shape[3], block_size)
        A_transpose = akg.tvm.compute(out_shape,
                                      lambda j0, j1, j2, j3, j4:
                                      A[j1 * block_size + j4, truncdiv(j0, block_size), j2, j3, truncmod(j0, block_size)],
                                      name=A.name + "_transposed")
        return A_transpose

    # Head is in 5D format
    # Output is in Fractal format
    def transpose_convert_head(Head):
        out_shape = ((floordiv(Head.shape[0].value, block_size)) * Head.shape[2].value * Head.shape[3].value,
                     Head.shape[1].value, block_size, block_size)
        tmp_6D_shape = (floordiv(Head.shape[0].value, block_size),
                        block_size, Head.shape[1].value, Head.shape[2].value, Head.shape[3].value, block_size)

        Head_6D = akg.topi.reshape(Head, tmp_6D_shape)
        # Transpose from (N//block_size_N, block_size_N, C//block_size_C, H, W, block_size_C)
        #           to   (N//block_size_N, H, W, C//block_size_C, block_size_C, block_size_N,)
        Head_6D_transpose = akg.topi.transpose(Head_6D, (0, 3, 4, 2, 5, 1))
        Head_transpose_convert = akg.topi.reshape(Head_6D_transpose, out_shape)
        return Head_transpose_convert


    X_transposed = transpose_data(A)
    pld_X_transposed = akg.tvm.placeholder(X_transposed.shape, name="inp0_transposed", dtype='float16')

    if (s_h > 1) or (s_w > 1):
        Head_transposed_converted = strided_head(HEAD, s_h, s_w)
    else:
        Head_transposed_converted = HEAD

    strided_head_n, strided_head_c1, strided_head_h, strided_head_w, strided_head_c0 = Head_transposed_converted.shape
    Head_transposed_converted = transpose_convert_head(Head_transposed_converted)

    s_transposed_converted = akg.tvm.create_schedule(Head_transposed_converted.op)

    pld_Head_transposed_converted = akg.tvm.placeholder(Head_transposed_converted.shape,
                                                        name="head_transposed",
                                                        dtype='float16')
    ad_attrs = {"ad_conv_enable": 1, "ad_conv_reuse_conv": 1}
    jacs = list(akg.differentiate(conv, [B], HEAD, ad_attrs, [pld_X_transposed, pld_Head_transposed_converted, None]))
    sjac = akg.tvm.create_schedule([jacs[0].op])

    op_vars = [HEAD, pld_X_transposed, pld_Head_transposed_converted, jacs[0]]
    in_n, in_c1, in_h, in_w, in_c0 = A.shape
    info = set_dims((in_c1.value * in_c0.value, in_n.value, in_h.value, in_w.value),
                    (strided_head_c1.value * strided_head_c0.value, strided_head_n.value,
                     strided_head_h.value, strided_head_w.value),
                    (0, 0), (1, 1), (1, 1),
                    tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)


    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod_backward2 = akg.build(sjac, op_vars, "cce",
                                  name="conv_backward_weight",
                                  attrs={"dim": str(info)},
                                  polyhedral=True)

    return mod_backward, mod_backward2
Ejemplo n.º 8
0
def test_CCE_Conv(fmap_shape,
                  filter_shape,
                  pad_,
                  stride_,
                  tile_hh=0,
                  tile_coco=0,
                  tile_mm=0,
                  tile_kk=0,
                  tile_nn=0,
                  bypass_l1=False,
                  use_bias=False,
                  kernel_name="quant_conv",
                  cce_path='.'):
    # input shape (NCHW -> NC1HWC0)
    in_n, in_c, in_h, in_w = fmap_shape
    input_shape_nc1hwc0 = (in_n, in_c // block_size, in_h, in_w, block_size)
    # out_shape_nc1hwc0 = (in_n, in_c // 32, in_h, in_w, 32)
    in_n, in_c1, in_h, in_w, in_c0 = input_shape_nc1hwc0

    # kernel shape (NCHW -> NC1HWC0 -> Fractal)
    k_n, k_c, k_h, k_w = filter_shape
    kernel_shape_nc1hwc0 = (k_n, k_c // 32, k_h, k_w, 32)
    k_n, k_c1, k_h, k_w, k_c0 = kernel_shape_nc1hwc0
    kernel_shape_fractal = (k_c // 32 * k_h * k_w, k_n // 16, 16, 32)
    f_ko, f_no, f_ni, f_ki = kernel_shape_fractal

    # bias shape
    bias_shape_nc1hwc0 = (1, k_n // block_size, 1, 1, block_size)

    # padding ((padding_h, padding_w) -> (padding_top, padding_bottom, padding_left, padding_right))
    padding = (pad_[0], pad_[0], pad_[1], pad_[1])
    p_top, p_bottom, p_left, p_right = padding

    # stride (stride_h, stride_w)
    s_h, s_w = stride_

    # A placeholder (NC1HWCO)
    A = akg.tvm.placeholder(input_shape_nc1hwc0, dtype=conv_dtype, name='FMap')
    # B_placeholder (fractal)
    B = akg.tvm.placeholder(kernel_shape_fractal, dtype='int8', name='Filter')
    ScaleQ = akg.tvm.placeholder((16, ), dtype='float16', name='ScaleQ')
    OffsetQ = akg.tvm.placeholder((16, ), dtype='float16', name='OffsetQ')

    out_shape_nc1hwc0 = (in_n, in_c // 32, in_h, in_w, 32)
    q_n, q_c1, q_h, q_w, q_c0 = out_shape_nc1hwc0
    # print out_shape_nc1hwc0
    Quant = akg.tvm.compute(out_shape_nc1hwc0,
                            lambda qn, qc1, qh, qw, qc0:
                            (A[qn, qc1 + qc0 // 16, qh, qw, qc0 % 16] * ScaleQ[
                                0] + OffsetQ[0]).astype('int8'),
                            name='QuantOUT',
                            attrs={'no_inline': 1})

    if use_bias:
        bias_name = 'bias'
        bias_value = akg.tvm.placeholder(bias_shape_nc1hwc0,
                                         dtype=conv_dtype,
                                         name=bias_name)
    else:
        bias_name = 'None'

    # Create reduction variables
    kc1 = akg.tvm.reduce_axis((0, k_c1), name='kc1')
    kh = akg.tvm.reduce_axis((0, k_h), name='kh')
    kw = akg.tvm.reduce_axis((0, k_w), name='kw')
    kc0 = akg.tvm.reduce_axis((0, k_c0), name='kc0')

    out_h = (in_h + p_top + p_bottom - k_h) // (s_h) + 1
    tile_out_h = (tile_hh - k_h) // s_h + 1
    out_w = (in_w + p_left + p_right - k_w) // (s_w) + 1

    out_shape_nc1hwc0 = (in_n, k_n // block_size, out_h, out_w, block_size)
    out_n, out_c1, out_h, out_w, out_c0 = out_shape_nc1hwc0

    if (tile_coco > 0):
        c1_cut = tile_coco // block_size
    else:
        c1_cut = out_c1

    # set dim
    index = 0
    info = dim.Dim()
    if (q_c1 > 1):
        info.setdim(index=index, axis="KO", tilel1=q_c1, tilel0=q_c1)  # ko
    if (q_h > 1):
        info.setdim(index=index,
                    axis="C1",
                    tilel1=tile_out_h,
                    tilel0=tile_out_h)  # c1
    if (q_w > 1):
        info.setdim(index=index, axis="C0", tilel1=q_w, tilel0=q_w)  # c0
    if (q_c0 > 1):
        info.setdim(index=index, axis="KI", tilel1=q_c0, tilel0=q_c0)  # ki

    index += 1
    if (out_c1 > 1):
        info.setdim(index=index, axis="C1", tilel1=c1_cut, tilel0=0)  # c1
    if (out_h > 1):
        info.setdim(index=index, axis="H", tilel1=tile_out_h, tilel0=0)  # h
    if (out_w > 1):
        info.setdim(index=index, axis="W", tilel1=out_w, tilel0=0)  # w
    if (out_c0 > 1):
        info.setdim(index=index, axis="C0", tilel1=out_c0, tilel0=0)  # c0
    if (in_c1 > 1):
        info.setdim(index=index, axis="KC1", tilel1=in_c1 / 2, tilel0=0)  # kc1
    if (k_h > 1):
        info.setdim(index=index, axis="KH", tilel1=k_h, tilel0=0)  # kh
    if (k_w > 1):
        info.setdim(index=index, axis="KW", tilel1=k_w, tilel0=0)  # kw
    info = str(info)

    # Compute the convolution
    output_name = "output0"
    output_bias_name = "output1"

    # print out_shape_nc1hwc0
    C = akg.tvm.compute(
        out_shape_nc1hwc0,
        lambda n, c1, h, w, c0: akg.tvm.sum(akg.tvm.if_then_else(
            akg.tvm.any((h * s_h + kh) < p_top, (h * s_h + kh) >
                        (in_h + p_top - 1), (w * s_w + kw) < p_left,
                        (w * s_w + kw) >
                        (in_w + p_left - 1)), akg.tvm.const(0.0, 'int8'),
            Quant[n, kc1, (h * s_h + kh - p_top),
                  (w * s_w + kw - p_left), kc0]) * B[
                      (kc1 * k_h + kh) * k_w + kw, c1, c0, kc0],
                                            axis=[kc1, kh, kw, kc0]),
        name=output_name,
        attrs={
            "pragma_conv_kernel_n": k_n,
            "pragma_conv_kernel_h": k_h,
            "pragma_conv_kernel_w": k_w,
            "pragma_conv_padding_top": p_top,
            "pragma_conv_padding_bottom": p_bottom,
            "pragma_conv_padding_left": p_left,
            "pragma_conv_padding_right": p_right,
            "pragma_conv_dilation_h": 1,
            "pragma_conv_dilation_w": 1,
            "pragma_conv_bypass_l1": 1 if bypass_l1 else 0,
            "pragma_conv_stride_h": s_h,
            "pragma_conv_stride_w": s_w,
            "pragma_conv_fm_n": in_n,
            "pragma_conv_fm_c": in_c,
            "pragma_conv_fm_h": in_h,
            "pragma_conv_fm_w": in_w,
            "pragma_conv_h_cut": (h_window_cut - 1) * s_h + k_h,
            "pragma_conv_w_cut": (in_w + p_left + p_right),
            "pragma_conv_co_cut": c1_cut * k_c0,
            "pragma_conv_m_cut": tile_mm,
            "pragma_conv_k_cut": tile_kk,
            "pragma_conv_n_cut": tile_nn,
            "feature": Quant.op.name,
            "filter": B.op.name,
            "bias": bias_name,
            "res": output_name,
            "res_bias": output_bias_name
        })

    if use_bias:
        cube = akg.tvm.compute(out_shape_nc1hwc0,
                               lambda n, c1, h, w, c0: C[n, c1, h, w, c0] +
                               bias_value[0, c1, 0, 0, c0],
                               name=output_bias_name)
    else:
        cube = C

    if fusion:
        # leakly relu
        negative_slope = 0.0
        slope_tmp = akg.tvm.const(negative_slope, dtype=conv_dtype)
        # negative_slope*x
        out = akg.lang.cce.vmuls(cube, slope_tmp)
        # max(x,negative_slope*x)
        out = akg.lang.cce.vmax(out, cube)
    else:
        out = cube

    # schedule
    s = akg.tvm.create_schedule(out.op)
    attrs = {}
    attrs["pragma_reschedule"] = 1
    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        if fusion:
            if use_bias:
                mod = akg.build(s, [A, B, ScaleQ, OffsetQ, bias_value, out],
                                "cce",
                                name=kernel_name,
                                attrs=attrs,
                                attrs={"dim": info},
                                polyhedral=True)
            else:
                mod = akg.build(s, [A, B, ScaleQ, OffsetQ, out],
                                "cce",
                                name=kernel_name,
                                attrs=attrs,
                                attrs={"dim": info},
                                polyhedral=True)
        else:
            if use_bias:
                mod = akg.build(s, [A, B, ScaleQ, OffsetQ, bias_value, out],
                                "cce",
                                name=kernel_name,
                                attrs=attrs,
                                attrs={"dim": info},
                                polyhedral=True)
            else:
                mod = akg.build(s, [A, B, ScaleQ, OffsetQ, out],
                                "cce",
                                name=kernel_name,
                                attrs=attrs,
                                attrs={"dim": info},
                                polyhedral=True)
    source_code = mod.imported_modules[0].get_source()
    # print(source_code)
    # utils.create_code(kernel_name, cce_path, source_code)
    if run_cce:
        run_conv(mod, fmap_shape, filter_shape, pad_[0], stride_[0], use_bias)
Ejemplo n.º 9
0
def group_conv_forward(_n,
                       _h,
                       _w,
                       _c_i,
                       _c_o,
                       group,
                       _k_h,
                       _k_w,
                       _a,
                       _b,
                       bias_value,
                       pad_h,
                       pad_w,
                       _s_h,
                       _s_w,
                       cut_h,
                       cut_co,
                       cut_m,
                       cut_k,
                       cut_n,
                       block_size,
                       use_bias=False,
                       kernel_name='group_conv'):
    if (not isinstance(_n, int)):
        _n, _h, _w, _c_i, _c_o, group, _k_h, _k_w = expr_to_int(
            (_n, _h, _w, _c_i, _c_o, group, _k_h, _k_w))
        pad_h, pad_w, _s_h, _s_w = expr_to_int((pad_h, pad_w, _s_h, _s_w))
        cut_h, cut_co, cut_m, cut_k, cut_n, block_size = expr_to_int(
            (cut_h, cut_co, cut_m, cut_k, cut_n, block_size))

    conv_dtype = 'float16'

    if cut_h == _h:
        cut_h += pad_h + pad_h

    assert _c_o % group == 0 and _c_i % group == 0
    assert _c_o % block_size == 0 and (_c_i // group) % block_size == 0

    if (use_bias):
        bias = bias_value

    _o_h = (_h + 2 * pad_h - _k_h) // _s_h + 1
    _o_w = (_w + 2 * pad_w - _k_w) // _s_w + 1

    kc1 = akg.tvm.reduce_axis((0, _c_i // block_size // group), name='kc1')
    kh = akg.tvm.reduce_axis((0, _k_h), name='kh')
    kw = akg.tvm.reduce_axis((0, _k_w), name='kw')
    kc0 = akg.tvm.reduce_axis((0, block_size), name='kc0')

    p_top, p_bottom, p_left, p_right = pad_h, pad_h, pad_w, pad_w
    output_name = 'output'
    output_bias_name = 'output_bias'

    C = akg.tvm.compute(
        (_n, _c_o // block_size, _o_h, _o_w, block_size),
        lambda n, c1, h, w, c0: akg.lang.cce.mmad(akg.tvm.if_then_else(
            akg.tvm.any((h * _s_h + kh) < p_top, (h * _s_h + kh) >
                        (_h + p_top - 1), (w * _s_w + kw) < p_left,
                        (w * _s_w + kw) >
                        (_w + p_left - 1)), akg.tvm.const(0.0, conv_dtype),
            _a[n, c1 // ((_c_o // block_size) // group) *
               ((_c_i // block_size) // group) + kc1, (h * _s_h + kh - p_top),
               (w * _s_w + kw - p_left), kc0]) * _b[
                   (kc1 * _k_h + kh) * _k_w + kw, c1, c0, kc0],
                                                  axis=[kc1, kh, kw, kc0]),
        attrs={
            "pragma_conv_kernel_n": _c_o,
            "pragma_conv_kernel_h": _k_h,
            "pragma_conv_kernel_w": _k_w,
            "pragma_conv_padding_top": p_top,
            "pragma_conv_padding_bottom": p_bottom,
            "pragma_conv_padding_left": p_left,
            "pragma_conv_padding_right": p_right,
            "pragma_conv_bypass_l1": 1,
            "pragma_conv_stride_h": _s_h,
            "pragma_conv_stride_w": _s_w,
            "pragma_conv_fm_n": _n,
            "pragma_conv_fm_c": _c_i,
            "pragma_conv_fm_h": _h,
            "pragma_conv_fm_w": _w,
            "pragma_conv_dilation_h": 1,
            "pragma_conv_dilation_w": 1,
            "pragma_conv_h_cut": cut_h,
            "pragma_conv_w_cut": _w + 2 * pad_w,
            "pragma_conv_co_cut": cut_co,
            "pragma_conv_m_cut": cut_m,
            "pragma_conv_k_cut": cut_k,
            "pragma_conv_n_cut": cut_n,
            "feature": _a.op.name,
            "filter": _b.op.name,
            "bias": 'bias',
            "res": output_name,
            "res_bias": output_bias_name
        },
        name=output_name)

    if use_bias:
        out = akg.tvm.compute(
            C.shape,
            lambda n, c1, h, w, c0: C[n, c1, h, w, c0] + bias[0, c1, 0, 0, c0],
            name=output_bias_name)
        bufs = [_a, _b, bias, out]
    else:
        out = C
        bufs = [_a, _b, out]

    # create schedule for cce
    s = akg.tvm.create_schedule([out.op])

    # set dim
    info = set_dims_group(cut_h, cut_co, cut_m, cut_k, cut_n,
                          expr_to_int(out.shape), _c_i, _c_o, group, _k_h,
                          _k_w, _s_h, block_size)

    # build
    with akg.build_config(add_lower_pass=cce.debug_mode(0),
                          dump_pass_ir=False):
        mod = akg.build(s,
                        bufs,
                        "cce",
                        name=kernel_name,
                        attrs={"dim": info},
                        polyhedral=True)

    return out
Ejemplo n.º 10
0
def group_conv_ad(_n,
                  _h,
                  _w,
                  _c_i,
                  _c_o,
                  group,
                  _k_h,
                  _k_w,
                  pad_h,
                  pad_w,
                  _s_h,
                  _s_w,
                  cut_h,
                  cut_co,
                  cut_m,
                  cut_k,
                  cut_n,
                  block_size,
                  use_bias=False,
                  kernel_name='group_conv'):
    conv_dtype = 'float16'
    _a = akg.tvm.placeholder((_n, _c_i // block_size, _h, _w, block_size),
                             name="input0",
                             dtype=conv_dtype)
    _b = akg.tvm.placeholder(((_c_i // group) // block_size * _k_h * _k_w,
                              _c_o // block_size, block_size, block_size),
                             name="input1",
                             dtype=conv_dtype)

    mod_forward = group_conv_forward(_n, _h, _w, _c_i, _c_o, group, _k_h, _k_w,
                                     _a, _b, None, pad_h, pad_w, _s_h, _s_w,
                                     cut_h, cut_co, cut_m, cut_k, cut_n,
                                     block_size)
    _o_h = mod_forward.shape[2].value
    _o_w = mod_forward.shape[3].value

    head = akg.tvm.placeholder(mod_forward.shape,
                               name="head",
                               dtype=conv_dtype)
    # (_n,_c_o,_o_h,_o_w)--(stride)-->(_n,_c_o,(_o_h-1)*_s_h+1,
    # (_o_w-1)*_s_w+1)--(5d)-->(_n,_c_o/16,(_o_h-1)*_s_h+1,(_o_w-1)*_s_w+1,16)
    pld_head_strided = akg.tvm.placeholder(
        (_n, _c_o // block_size, (_o_h - 1) * _s_h + 1,
         (_o_w - 1) * _s_w + 1, block_size),
        name="head_strided_5d",
        dtype=conv_dtype)

    # (_c_o,_c_i//group,_k_h,_k_w)--(flip)-->
    # (_c_i,_c_o//group,_k_h,_k_w)--(Fractal)-->((_c_o//group)/16*_k_h*_k_w, _c_i/16,16,16)
    pld_b_flipped = akg.tvm.placeholder(
        ((_c_o // group) // block_size * _k_h * _k_w, _c_i // block_size,
         block_size, block_size),
        name="b_flip",
        dtype=conv_dtype)

    # b in Fractal format; result in Fractal format
    b_group_flipped = group_flip_weight(_b, _k_h, _k_w, group,
                                        _c_o // group // block_size,
                                        _c_i // group // block_size,
                                        block_size)
    s_gr_fl = akg.tvm.create_schedule([b_group_flipped.op])
    info = dim.Dim()
    info.setdim(index=0, axis=0, tilel1=1, tilel0=1)
    info.setdim(index=0, axis=1, tilel1=1, tilel0=1)
    info.setdim(index=0, axis=2, tilel1=1, tilel0=1)
    info.setdim(index=0, axis=3, tilel1=1, tilel0=1)

    with akg.build_config(add_lower_pass=cce.debug_mode(0),
                          dump_pass_ir=False):
        mod_b_group_flip = akg.build(s_gr_fl, [_b, b_group_flipped],
                                     "cce",
                                     name="b_group_flip",
                                     attrs={"dim": str(info)},
                                     polyhedral=True)

    head_strided = strided_head(head, _s_h, _s_w)
    s_striding = akg.tvm.create_schedule(head_strided.op)

    with akg.build_config(add_lower_pass=cce.debug_mode(0),
                          dump_pass_ir=False):
        mod_head_strided = akg.build(s_striding, [head, head_strided],
                                     "cce",
                                     name="h_strided",
                                     attrs={"dim": str(info)},
                                     polyhedral=True)

    a_transposed = transpose_regroup(_a, block_size, group)
    s_transposed_nc = akg.tvm.create_schedule(a_transposed.op)
    info = dim.Dim()
    info.setdim(index=0, axis=0, tilel1=16, tilel0=16)
    info.setdim(index=0, axis=1, tilel1=1, tilel0=1)
    info.setdim(index=0, axis=2, tilel1=1, tilel0=1)
    info.setdim(index=0, axis=3, tilel1=1, tilel0=1)

    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod_transposed_nc = akg.build(s_transposed_nc, [_a, a_transposed],
                                      "cce",
                                      name="a_transposed",
                                      attrs={"dim": str(info)},
                                      polyhedral=True)

    head_transposed_convert = transpose_convert_head(head, block_size)
    s_transposed_convert = akg.tvm.create_schedule(head_transposed_convert.op)
    info = dim.Dim()
    info.setdim(index=0, axis=0, tilel1=1, tilel0=1)
    info.setdim(index=0, axis=1, tilel1=1, tilel0=1)
    info.setdim(index=0, axis=2, tilel1=1, tilel0=1)
    info.setdim(index=0, axis=3, tilel1=1, tilel0=1)

    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod_transposed_convert = akg.build(s_transposed_convert,
                                           [head, head_transposed_convert],
                                           "cce",
                                           name="a_transposed",
                                           attrs={"dim": str(info)},
                                           polyhedral=True)

    # Begin with the ad kernels
    ad_attrs = {"ad_conv_enable": 1}
    _jacs_data = list(
        akg.differentiate(mod_forward, [_a], head, ad_attrs,
                          [pld_head_strided, pld_b_flipped, None]))

    cut_h_e, cut_co_e, cut_m_e, cut_k_e, cut_n_e = ((_o_h - 1) * _s_h + 1 + 2 *
                                                    (_k_h - 1 - pad_h), 16,
                                                    _h * _w, 48, 16)
    cut_m_e = ((cut_m_e + block_size - 1) // block_size) * block_size

    info = set_dims_group(cut_h_e, cut_co_e, cut_m_e, cut_k_e, cut_n_e,
                          expr_to_int(_a.shape), _c_o, _c_i, group, _k_h, _k_w,
                          _s_h, block_size)

    s_data = akg.tvm.create_schedule([_jacs_data[0].op])
    # low_data = akg.lower(s_data, [pld_head_strided, pld_b_flipped, _jacs_data[0]], simple_mode=True)

    with akg.build_config(add_lower_pass=cce.debug_mode(0),
                          dump_pass_ir=False):
        mod_ad_data = akg.build(
            s_data, [pld_head_strided, pld_b_flipped, _jacs_data[0]],
            "cce",
            name="conv_ad_data",
            attrs={"dim": info},
            polyhedral=True)

    # (_n,_c_i,_h,_w)--(trans)-->(_c_i,_n,_h,_w)--(regroup)-->
    # (_c_i//group,_n*group,_h,_w)--(5d)-->(_c_i//group,(_n*group)/16,_h,_w,16)
    pld_x_trans = akg.tvm.placeholder(
        (_c_i // group, (_n * group) // block_size, _h, _w, block_size),
        name="x_trans_5d",
        dtype=conv_dtype)

    # (_n,_c_o,_o_h,_o_w)--(trans)-->
    # (_c_o,_n,_o_h,_o_w)--(Fractal)-->(_n/16*_o_h*_o_w, _c_o/16,16,16)
    pld_head_trans_converted = akg.tvm.placeholder(
        (_n // block_size * _o_h * _o_w, _c_o // block_size, block_size,
         block_size),
        name="head_trans_convert",
        dtype=conv_dtype)

    # ad_attrs = {"ad_conv_enable": 1}
    _jacs_weights = list(
        akg.differentiate(mod_forward, [_b], head, ad_attrs,
                          [pld_x_trans, pld_head_trans_converted, None]))

    cut_h_e, cut_co_e, cut_m_e, cut_k_e, cut_n_e = (_h + 2 * pad_h, 16,
                                                    _k_h * _k_w, 48, 16)
    cut_m_e = ((cut_m_e + block_size - 1) // block_size) * block_size

    info = set_dims_group(
        cut_h_e, cut_co_e, cut_m_e, cut_k_e, cut_n_e,
        (_c_i // group, _c_o // block_size, _k_h, _k_w, block_size),
        _n * group, _c_o, group, _o_h, _o_w, 1, block_size)

    s_weights = akg.tvm.create_schedule([_jacs_weights[0].op])

    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod_ad_weights = akg.build(
            s_weights,
            [pld_x_trans, pld_head_trans_converted, _jacs_weights[0]],
            "cce",
            name="conv_ad_weights",
            attrs={"dim": info},
            polyhedral=True)

    print("Forward input data shape: ", _a.shape)
    print("Forward input weight shape: ", _b.shape)
    print("Forward output shape: ", mod_forward.shape)
    print("Backward wrt. DATA input data shape: ", pld_head_strided.shape)
    print("Backward wrt. DATA input weight shape: ", pld_b_flipped.shape)
    print("Backward wrt. DATA output shape: ", _jacs_data[0].shape)
    print("Backward wrt. WEIGHT input data shape: ", pld_x_trans.shape)
    print("Backward wrt. WEIGHT input weight shape: ",
          pld_head_trans_converted.shape)
    print("Backward wrt. WEIGHT output shape: ", _jacs_weights[0].shape)

    return mod_ad_data, mod_ad_weights, mod_b_group_flip, mod_head_strided, mod_transposed_nc, mod_transposed_convert
Ejemplo n.º 11
0
def psroialign_compute(fm_shape, roi_shape, class_num, group_size, sample_h,
                       sample_w, scale):
    '''

    :param fm_shape:   (n, c_dim, h, w) where: c_dim = group_size * group_size * (class_num + 1)
    :param roi_shape:  (roi_num, 16, 1, 1). there are 5 value on dim C: score, x1, y1, x2, y2. The other 11 num is pads
    :param class_num:
    :param group_size:
    :param sample_h:
    :param sample_w:
    :param scale:
    :return:

    '''

    dtype = "float16"

    fm_data = akg.tvm.placeholder(fm_shape, name="fm_data", dtype=dtype)
    roi_data = akg.tvm.placeholder(roi_shape, name="roi_data", dtype=dtype)
    scale_const = akg.tvm.const(scale, dtype=dtype)

    sample_h_const = akg.tvm.const(sample_h, "int32")
    sample_w_const = akg.tvm.const(sample_w, "int32")
    two_const = akg.tvm.const(2, "float16")
    one_const = akg.tvm.const(1, "float16")
    group_size_const = akg.tvm.const(group_size, "int32")

    bin_num = group_size * group_size

    # ==============================================================
    # step 1: scale coordinates size in original image to size in feature map
    # ==============================================================

    COSIZE = 16
    roi_num = roi_shape[0]
    aligned_roi_num = do_align(roi_num, COSIZE)

    # 4 means x1, y1, x2, y2
    # roi_shape[0] must be equal to COSIZE
    scaled_coors = akg.tvm.compute(
        (4, aligned_roi_num, 1, 1),
        lambda n, c, h, w: roi_data[c, 1 + n, h, w] * scale_const,
        name='scaled_coors')

    # ==============================================================
    # step 2: compute the width and height of roi
    # ==============================================================

    # 2 stands for width and height
    width_height_shape = (2, aligned_roi_num, 1, 1)
    width_height_of_rois = akg.tvm.compute(
        width_height_shape,
        lambda n, c, h, w: scaled_coors[n + 2, c, h, w] - scaled_coors[n, c, h,
                                                                       w],
        name='width_height_of_rois')

    width_shape = (aligned_roi_num, )
    width_of_rois = akg.tvm.compute(
        width_shape,
        lambda n: scaled_coors[2, n, 0, 0] - scaled_coors[0, n, 0, 0],
        name='width_of_rois')
    width_shape = (aligned_roi_num, )
    height_of_rois = akg.tvm.compute(
        width_shape,
        lambda n: scaled_coors[1, n, 0, 0] - scaled_coors[3, n, 0, 0],
        name='height_of_rois')

    # ==============================================================
    # step 3: compute the bias of the coordinates of all samples
    # ==============================================================

    # samples_shape = (aligned_roi_num, bin_num, sample_h, sample_w)

    # unit_nums = akg.tvm.compute((2,), lambda i: two_const * group_size_const \
    #                                         * akg.tvm.expr.Select(i == 0, sample_w_const, sample_h_const), name = 'uint_nums')

    # width_height_shape(0, x, x, x) indicates the width of a single unit which is separated by samples
    # and width_height_shape(1, x, x, x) the height
    # unit_lengths = akg.tvm.compute(width_height_shape, lambda n, c, h, w: width_height_of_rois(n, c, h, w) / unit_nums(n), \
    #                            name = 'uint_lengths')

    unit_w_lengths = akg.tvm.compute(
        width_shape,
        lambda n: width_of_rois(n) / sample_w_const * group_size_const,
        name='uint_w_lengths')
    unit_h_lengths = akg.tvm.compute(
        width_shape,
        lambda n: height_of_rois(n) / sample_h_const * group_size_const,
        name='uint_h_lengths')

    # samples_coors_x_shape = (aligned_roi_num, 1, group_size * sample_h, group_size * sample_w)
    # samples_x_coors_bias = akg.tvm.compute(samples_coors_x_shape, lambda n, c, h, w: unit_w_lengths[n] * \
    #                                         (one_const + w * two_const), name = 'samples_x_coors_bias')
    #
    # samples_y_coors_bias = akg.tvm.compute(samples_coors_x_shape, lambda n, c, h, w: unit_h_lengths[n] * \
    #                                         (one_const + w * two_const), name = 'samples_y_coors_bias')
    #
    # samples_x_coors = akg.tvm.compute(samples_coors_x_shape, lambda n, c, h, w: \
    #     samples_x_coors_bias(n, c, h, w) + scaled_coors(1, c, 1, 1), name = 'samples_x_coors')
    # samples_y_coors = akg.tvm.compute(samples_coors_x_shape, lambda n, c, h, w: \
    #     samples_y_coors_bias(n, c, h, w) + scaled_coors(2, c, 1, 1), name = 'samples_y_coors')

    sample_w_bias_shape = (1, group_size, sample_w, aligned_roi_num)
    # sample_w_bias = akg.tvm.compute(sample_w_bias_shape, lambda n, c, h, w: unit_w_lengths[w] * \
    #                                 (one_const + two_const * (c * sample_w_const + h)), name = 'samples_w_bias')
    # sample_w_bias = akg.tvm.compute(sample_w_bias_shape, lambda n, c, h, w: unit_w_lengths[w] * \
    #                                   (one_const + two_const * (sample_w_const)), name = 'samples_w_bias')

    sample_h_bias_shape = (1, group_size, sample_h, aligned_roi_num)
    # sample_h_bias = akg.tvm.compute(sample_h_bias_shape, lambda n, c, h, w: unit_h_lengths[w] * \
    #                                 (one_const + two_const * (c * sample_h_const + h)), name = 'samples_h_bias')
    # sample_h_bias = akg.tvm.compute(sample_h_bias_shape, lambda n, c, h, w: unit_h_lengths[w] * \
    #                                   (one_const + two_const * (sample_h_const)), name = 'samples_h_bias')

    @akg.tvm.hybrid.script(capture=locals())
    def gen_bias(h_value, unit_lengths, ratio):
        output = output_tensor((1, group_size, h_value, aligned_roi_num),
                               'float16')

        strides = allocate((aligned_roi_num, ), 'float16', 'local')
        for w in range(0, aligned_roi_num):
            strides[w] = half(0.0)

        for c in range(0, group_size):
            for h in range(0, 1):
                for w in range(0, aligned_roi_num):
                    output[0, c, h, w] = unit_lengths[w]
                    # strides[w] += unit_lengths[w] * ratio * half(h_value)

            for h in range(1, h_value):
                for w in range(0, aligned_roi_num):
                    output[0, c, h, w] = output[0, c, h - 1,
                                                w] + ratio * unit_lengths[w]

        return output

    sample_w_bias = gen_bias(sample_w_const, unit_w_lengths, two_const)
    sample_h_bias = gen_bias(sample_h_const, unit_h_lengths, two_const)

    samples_x_coors = akg.tvm.compute(
        sample_w_bias_shape,
        lambda n, c, h, w: sample_w_bias(n, c, h, w) + scaled_coors(
            0, w, 0, 0),
        name='samples_x_coors')

    samples_y_coors = akg.tvm.compute(
        sample_h_bias_shape,
        lambda n, c, h, w: sample_h_bias(n, c, h, w) + scaled_coors(
            1, w, 0, 0),
        name='samples_y_coors')

    # ==============================================================
    # step 4: compute the low and high coordinates of samples for bilinear
    # ==============================================================
    # samples_x_coors_low = akg.tvm.compute(sample_w_bias_shape, lambda *indices: \
    #     akg.lang.cce.floor(samples_x_coors(*indices)), name = 'samples_x_coors_low')
    # samples_x_coors_high = akg.tvm.compute(sample_w_bias_shape, lambda *indices: \
    #     akg.lang.cce.ceil(samples_x_coors(*indices)), name = 'samples_x_coors_high')
    # samples_y_coors_low = akg.tvm.compute(sample_h_bias_shape, lambda *indices: \
    #     akg.lang.cce.floor(samples_y_coors(*indices)), name = 'samples_y_coors_low')
    # samples_y_coors_high = akg.tvm.compute(sample_h_bias_shape, lambda *indices: \
    #     akg.lang.cce.ceil(samples_y_coors(*indices)), name = 'samples_y_coors_high')
    samples_x_coors_low = akg.lang.cce.floor(samples_x_coors)
    samples_x_coors_high = akg.lang.cce.ceil(samples_x_coors)
    samples_y_coors_low = akg.lang.cce.floor(samples_y_coors)
    samples_y_coors_high = akg.lang.cce.ceil(samples_y_coors)

    # samples_x_coors_low = akg.tvm.compute(sample_w_bias_shape, lambda *indices: \
    #     akg.topi.cast(samples_x_coors(*indices), 'int32'), name = 'samples_x_coors_low')
    # samples_x_coors_high = akg.tvm.compute(sample_w_bias_shape, lambda *indices: \
    #     samples_x_coors_low(*indices) + akg.topi.cast(one_const, 'int32'), name = 'samples_x_coors_high')
    # samples_y_coors_low = akg.tvm.compute(sample_h_bias_shape, lambda *indices: \
    #     akg.topi.cast(samples_y_coors(*indices), 'int32'), name = 'samples_y_coors_low')
    # samples_y_coors_high = akg.tvm.compute(sample_h_bias_shape, lambda *indices: \
    #     samples_y_coors_low(*indices) + akg.topi.cast(one_const, 'int32'), name = 'samples_y_coors_high')

    # ==============================================================
    # step 5: compute the weight of low and high coordinates for bilinear
    # ==============================================================
    # wlx = akg.tvm.compute(samples_coors_x_shape, lambda *indices: samples_x_coors_high(*indices) - samples_x_coors(*indices))
    # whx = akg.tvm.compute(samples_coors_x_shape, lambda *indices: one_const - wlx(*indices))
    #
    # wly = akg.tvm.compute(samples_coors_x_shape, lambda *indices: samples_y_coors_high(*indices) - samples_y_coors(*indices))
    # why = akg.tvm.compute(samples_coors_x_shape, lambda *indices: one_const - wly(*indices))
    #
    # wlxXwly = akg.tvm.compute(samples_coors_x_shape, lambda *indices: wlx(*indices) * wly(*indices))
    # whxXwly = akg.tvm.compute(samples_coors_x_shape, lambda *indices: whx(*indices) * wly(*indices))
    # wlxXwhy = akg.tvm.compute(samples_coors_x_shape, lambda *indices: wlx(*indices) * why(*indices))
    # whxXwhy = akg.tvm.compute(samples_coors_x_shape, lambda *indices: whx(*indices) * why(*indices))

    wlx = akg.tvm.compute(sample_w_bias_shape,
                          lambda *indices: samples_x_coors_high(*indices) -
                          samples_x_coors(*indices),
                          name='wlx')
    whx = akg.tvm.compute(sample_w_bias_shape,
                          lambda *indices: one_const - wlx(*indices),
                          name='whx')

    wly = akg.tvm.compute(sample_h_bias_shape,
                          lambda *indices: samples_y_coors_high(*indices) -
                          samples_y_coors(*indices),
                          name='wly')
    why = akg.tvm.compute(sample_h_bias_shape,
                          lambda *indices: one_const - wly(*indices),
                          name='why')

    samples_shape = (group_size, group_size, sample_h, sample_w,
                     aligned_roi_num)
    wlxXwly = akg.tvm.compute(
        samples_shape,
        lambda i, j, m, n, k: wlx(0, j, n, k) * wly(0, i, m, k),
        name='wlxXwly')
    whxXwly = akg.tvm.compute(
        samples_shape,
        lambda i, j, m, n, k: whx(0, j, n, k) * wly(0, i, m, k),
        name='whxXwly')
    wlxXwhy = akg.tvm.compute(
        samples_shape,
        lambda i, j, m, n, k: wlx(0, j, n, k) * why(0, i, m, k),
        name='wlxXwhy')
    whxXwhy = akg.tvm.compute(
        samples_shape,
        lambda i, j, m, n, k: whx(0, j, n, k) * why(0, i, m, k),
        name='whxXwhy')

    boundaries_values_shape = (4, sample_h, sample_w, aligned_roi_num)
    bin_values_shape = (1, class_num + 1, bin_num, aligned_roi_num)
    gap_values_shape = (class_num + 1, aligned_roi_num)

    @akg.tvm.hybrid.script
    def fetch_data(shape, fm_in, c_idx, bin_idx, bin_num, group_size, sample_h,
                   sample_w, roi_num, x_low, x_high, y_low, y_high, one_value):
        boundaries_values = output_tensor(shape, 'float16')

        for i in range(0, sample_h):
            for j in range(0, sample_w):
                for k in range(0, roi_num):
                    # assume batch is 1

                    # w_low_idx =  x_low[0, bin_idx % group_size, j, k]
                    # w_high_idx =  x_high[0, bin_idx % group_size, j, k]
                    #
                    # h_low_idx =  y_low[0, bin_idx // group_size, i, k]
                    # h_high_idx =  y_high[0, bin_idx // group_size, i, k]

                    #x_low, y_low
                    boundaries_values[0, i, j, k] = one_value
                    boundaries_values[1, i, j, k] = one_value
                    boundaries_values[2, i, j, k] = one_value
                    boundaries_values[3, i, j, k] = one_value
                    # boundaries_values[0, i, j, k] = fm_in[0, c_idx * bin_num + bin_idx, h_low_idx, w_low_idx]
                    #
                    # #x_high, y_low
                    # boundaries_values[1, i, j, k] = fm_in[0, c_idx * bin_num + bin_idx, h_low_idx, w_high_idx]
                    #
                    # #x_low, y_high
                    # boundaries_values[2, i, j, k] = fm_in[0, c_idx * bin_num + bin_idx, h_high_idx, w_low_idx]
                    #
                    # #x_high, y_high
                    # boundaries_values[3, i, j, k] = fm_in[0, c_idx * bin_num + bin_idx, h_high_idx, w_high_idx]

        return boundaries_values

    @akg.tvm.hybrid.script(capture=locals())
    def compute_bilinear_maxpool_gap(fm_in, x_low, x_high, y_low, y_high,
                                     wlxXwly_, whxXwly_, wlxXwhy_, whxXwhy_,
                                     one_value):

        bin_values = allocate(bin_values_shape, 'float16', 'local')

        # global average result
        gap_values = output_tensor(gap_values_shape, 'float16')

        for c in range(0, class_num + 1):
            for b in range(0, bin_num):
                boundaries_values = fetch_data(boundaries_values_shape, fm_in,
                                               c, b, bin_num, group_size,
                                               sample_h, sample_w, roi_num,
                                               x_low, x_high, y_low, y_high,
                                               one_value)

                k_w = b % group_size
                k_h = b // group_size

                for n in range(0, roi_num):
                    bin_values[0, c, b, n] = half(0.0)

                for h in range(0, sample_h):
                    for w in range(0, sample_w):
                        for n in range(0, roi_num):
                            # bilinear
                            tmp = boundaries_values[0, h, w, n] * wlxXwly_[k_h, k_w, h, w, n] + \
                                boundaries_values[1, h, w, n] * whxXwly_[k_h, k_w, h, w, n] + \
                                boundaries_values[2, h, w, n] * wlxXwhy_[k_h, k_w, h, w, n] + \
                                boundaries_values[3, h, w, n] * whxXwhy_[k_h, k_w, h, w, n]

                            # maxpooling
                            if tmp > bin_values[0, c, b, n]:
                                bin_values[0, c, b, n] = tmp

            # global average pooling
            for j in range(0, roi_num):
                tmp1 = bin_values[0, c, 0, j]
                for k in range(1, bin_num):
                    tmp1 += bin_values[0, c, k, j]

                gap_values[c, j] = tmp1 / bin_num

        return gap_values

    # ==============================================================
    # step 6: compute results of bilinear, maxpooling and global average pooling
    # ==============================================================
    out = compute_bilinear_maxpool_gap(fm_data, samples_x_coors_low,
                                       samples_x_coors_high,
                                       samples_y_coors_low,
                                       samples_y_coors_high, wlxXwly, whxXwly,
                                       wlxXwhy, whxXwhy, one_const)

    # out = wlxXwhy

    # info = dim.Dim()
    # info.setdim(index=0, head = 0, body = 0, tail = 0, tilel1 = 1, tilel0 = 1)
    # info.setdim(index=0, head = 0, body = 0, tail = 0, tilel1 = 1, tilel0 = 1)

    s = akg.tvm.create_schedule(out.op)
    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        # mod = akg.tvm.build(s, [fm_data, roi_data, out], "cce", name="psroialign", attrs = {"dim" : str(info)}, polyhedral=True)
        mod = akg.build(s, [fm_data, roi_data, out],
                        "cce",
                        name="psroialign",
                        polyhedral=True)

    return mod
Ejemplo n.º 12
0
def group_conv(N,
               H,
               W,
               CI,
               CO,
               group,
               KH,
               KW,
               PAD_H,
               PAD_W,
               SH,
               SW,
               cutH,
               cutCo,
               cutM,
               cutK,
               cutN,
               block_size,
               use_bias=False,
               kernel_name='conv'):
    """
    split channels of FeatureMap to some groups,every group has its filter-kernel

    Args:
        args1:a list,the size is 3 if use_bias else the size is 2;
              data[0] akg.tvm.Tensor of type float16 ,shape 5D(N, CI//C0, C0, H, W)
              data[1] akg.tvm.Tensor of type float16 ,shape 6D(CI//(CI//C0)//C0, KH, KW, k_ch*CI//C0, C0, C0)
              data[2] akg.tvm.Tensor of type float16 ,shape 5D(N, CI*k_ch//C0, OH, OW, C0)
        N:batchsize
        H:height of featureMap
        W:width of featureMap
        CI:channel of featureMap
        C0:num of Filters
        group:num of spliting channels of FeatureMap
        KH:height of Filter
        KW:width of Filter
        PAD_H:padding pixels in vertical direction
        PAD_W:padding pixels in horizontal direction
        SH:stride in vertical direction
        SW:stride in horizontal direction
        block_size:a int var
        use_bias:a bool value
    Returns:
        akg.tvm.Tensor of same type as data, shape is 5D(N, C0//block_size, block_size, OH, OW)
    """

    conv_dtype = "float16"

    if cutH == H:
        cutH += PAD_H + PAD_H

    assert CO % group == 0 and CI % group == 0
    assert CO % block_size == 0 and (CI // group) % block_size == 0

    # (N, CI, H, W) -> (N, C0, H, W, C1)
    A = akg.tvm.placeholder((N, CI // block_size, H, W, block_size),
                            dtype=conv_dtype,
                            name="A")
    # (CO, CI // group, KH, KW) -> (CI // group // block * KH * KW, CO // block, block, block)
    B = akg.tvm.placeholder((CI // group // block_size * KH * KW,
                             CO // block_size, block_size, block_size),
                            dtype=conv_dtype,
                            name="B")

    bias = akg.tvm.placeholder((1, CO // block_size, 1, 1, block_size),
                               dtype=conv_dtype,
                               name="bias")

    OH = (H + 2 * PAD_H - KH) // SH + 1
    OW = (W + 2 * PAD_W - KW) // SW + 1

    kc1 = akg.tvm.reduce_axis((0, CI // block_size // group), name="kc1")
    kh = akg.tvm.reduce_axis((0, KH), name="kh")
    kw = akg.tvm.reduce_axis((0, KW), name="kw")
    kc0 = akg.tvm.reduce_axis((0, block_size), name="kc0")

    p_top, p_bottom, p_left, p_right = PAD_H, PAD_H, PAD_W, PAD_W
    output_name = "output"
    output_bias_name = "output_bias"

    C = akg.tvm.compute(
        (N, CO // block_size, OH, OW, block_size),
        lambda n, c1, h, w, c0: akg.lang.cce.mmad(akg.tvm.if_then_else(
            akg.tvm.any((h * SH + kh) < p_top, (h * SH + kh) > (H + p_top - 1),
                        (w * SW + kw) < p_left, (w * SW + kw) >
                        (W + p_left - 1)), akg.tvm.const(0.0, conv_dtype),
            A[n, c1 // ((CO // block_size) // group) * (
                (CI // block_size) // group) + kc1, (h * SH + kh - p_top),
              (w * SW + kw - p_left), kc0]) * B[
                  (kc1 * KH + kh) * KW + kw, c1, c0, kc0],
                                                  axis=[kc1, kh, kw, kc0]),
        attrs={
            "pragma_conv_kernel_n": CO,
            "pragma_conv_kernel_h": KH,
            "pragma_conv_kernel_w": KW,
            "pragma_conv_padding_top": p_top,
            "pragma_conv_padding_bottom": p_bottom,
            "pragma_conv_padding_left": p_left,
            "pragma_conv_padding_right": p_right,
            "pragma_conv_bypass_l1": 1,
            "pragma_conv_stride_h": SH,
            "pragma_conv_stride_w": SW,
            "pragma_conv_fm_n": N,
            "pragma_conv_fm_c": CI,
            "pragma_conv_fm_h": H,
            "pragma_conv_fm_w": W,
            "pragma_conv_dilation_h": 1,
            "pragma_conv_dilation_w": 1,
            "pragma_conv_h_cut": cutH,
            "pragma_conv_w_cut": W + 2 * PAD_W,
            "pragma_conv_co_cut": cutCo,
            "pragma_conv_m_cut": cutM,
            "pragma_conv_k_cut": cutK,
            "pragma_conv_n_cut": cutN,
            "feature": A.op.name,
            "filter": B.op.name,
            "bias": bias.op.name,
            "res": output_name,
            "res_bias": output_bias_name
        },
        name=output_name)

    if use_bias:
        out = akg.tvm.compute(
            C.shape,
            lambda n, c1, h, w, c0: C[n, c1, h, w, c0] + bias[0, c1, 0, 0, c0],
            name=output_bias_name)
        bufs = [A, B, bias, out]
    else:
        out = C
        bufs = [A, B, out]

    # create schedule for cce
    s = akg.tvm.create_schedule([out.op])

    # set cut / tiling
    out_n, out_c1, out_h, out_w, out_c0 = akg.topi.util.get_const_tuple(
        out.shape)

    # set dim
    tile_out_h = (cutH - KH) // SH + 1

    info = dim.Dim()
    if (out_n > 1):
        info.setdim(index=0, axis=0, tilel1=1, tilel0=0)  # n
    if (out_c1 > 1):
        info.setdim(index=0, axis=0, tilel1=cutCo // block_size,
                    tilel0=0)  # c1
    if (out_h > 1):
        info.setdim(index=0, axis='H', tilel1=tile_out_h, tilel0=0)  # h
    if (out_w > 1):
        info.setdim(index=0, axis=3, tilel1=out_w, tilel0=0)  # w
    if (out_c0 > 1):
        info.setdim(index=0, axis=4, tilel1=out_c0, tilel0=0)  # c0
    assert CI // block_size // group == 1
    if (CI // block_size // group > 1):
        info.setdim(index=0,
                    axis=5,
                    tilel1=CI // block_size // group,
                    tilel0=0)  # kc1
    if (KH > 1):
        info.setdim(index=0, axis=5, tilel1=KH, tilel0=0)  # kh
    if (KW > 1):
        info.setdim(index=0, axis=5, tilel1=KW, tilel0=0)  # kw

    # build
    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod = akg.build(s,
                        bufs,
                        "cce",
                        name=kernel_name,
                        attrs={"dim": str(info)},
                        polyhedral=True)

    return OH, OW, A, B, C, mod