def smooth_l1_loss_grad_run(shape, dtype, attrs=None, kernel_name="smooth_l1_loss_grad"):
    assert len(shape) >= 2, "last dimension of the shape will be reduced, so the shape length should be >= 2"
    sample_shape = shape[:-1]

    anchor_samples_dtype = "int32"
    # sigma is a constant parameter
    sigma = 1.0
    anchor_sample_correct = 0

    if not utils.product_is_mini():
        attrs['enable_align_fix'] = True
        attrs['enable_multicore'] = True

    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        mod = utils.op_build_test(smooth_l1_loss_grad.smooth_l1_loss_grad, [sample_shape, shape, shape, sample_shape],
                                  [dtype, dtype, dtype, anchor_samples_dtype], op_attrs=[sigma, anchor_sample_correct],
                                  attrs=attrs, kernel_name=kernel_name, dump_code=True, tuning=t)
        if t:
            anchor_samples, dloss, expect, output, prediction, prediction_, target, target_ = gen_data(
                anchor_sample_correct, anchor_samples_dtype, dtype, sample_shape, shape, sigma)
            return mod, expect, (dloss, prediction, target, anchor_samples, output)
        else:
            return mod
    else:
        anchor_samples, dloss, expect, output, prediction, prediction_, target, target_ = gen_data(
            anchor_sample_correct, anchor_samples_dtype, dtype, sample_shape, shape, sigma)
        mod = utils.op_build_test(smooth_l1_loss_grad.smooth_l1_loss_grad,
                                  [sample_shape, shape, shape, sample_shape],
                                  [dtype, dtype, dtype, anchor_samples_dtype], op_attrs=[sigma, anchor_sample_correct],
                                  attrs=attrs, kernel_name=kernel_name, dump_code=True)
        output = utils.mod_launch(mod, (dloss, prediction, target, anchor_samples, output), expect=expect)
        return (dloss, prediction, target, anchor_samples), output, expect, compare_tensor(output, expect, atol=5e-3,
                                                                                           rtol=5e-3)
def distinguish_between_pn_samples_run(shape, threshold, dtype, attrs=None):
    if dtype == 'float32':
        threshold = int(threshold * 1000) / 1000.0

    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        mod = utils.op_build_test(
            distinguish_between_pn_samples.distinguish_between_pn_samples,
            [shape], [dtype],
            op_attrs=[threshold],
            kernel_name=kernel_name,
            attrs=attrs,
            tuning=t)
        if t:
            args, exp_output, input = gen_data(dtype, shape, threshold)
            return mod, exp_output, args
        else:
            return mod
    else:
        mod = utils.op_build_test(
            distinguish_between_pn_samples.distinguish_between_pn_samples,
            [shape], [dtype],
            op_attrs=[threshold],
            kernel_name="distinguish_between_pn_samples",
            attrs=attrs)
        args, exp_output, input = gen_data(dtype, shape, threshold)
        # run_testcase
        output = utils.mod_launch(mod, args, expect=exp_output)

        TestCase_Result = compare_tensor(output,
                                         exp_output,
                                         rtol=5e-03,
                                         equal_nan=True)
        return input, output, exp_output, TestCase_Result
def quantized_max_pool_run(shape, dtype1, shape_list, dtype2, ksize, strides,
                           padding, data_format, quant_algo,
                           scale_mode, scale_sqrt, attrs):
    """run function"""
    if not isinstance(shape_list, (list, tuple, type(None))):
        raise RuntimeError("shape_list should be a list, tuple or None!")
    op_attrs = [ksize, strides, padding, data_format,
                quant_algo, scale_mode, scale_sqrt]
    if shape_list is None:
        mod = utils.op_build_test(quantized_max_pool, [shape], [dtype1],
                                  op_attrs=[None] + op_attrs,
                                  kernel_name='quantized_maxpool', attrs=attrs)
    else:
        mod = utils.op_build_test(quantized_max_pool,
                                  [shape, shape_list], [dtype1, dtype2],
                                  op_attrs=op_attrs,
                                  kernel_name='quantized_maxpool', attrs=attrs)
    expect, inputs, out_buf = gen_data(shape, dtype1, shape_list, dtype2, ksize,
                                       strides, padding, data_format, quant_algo,
                                       scale_mode, scale_sqrt)
    output = utils.mod_launch(mod, (*inputs, *out_buf), expect=expect)
    rtol, atol = get_rtol_atol("quantized_maxpool", dtype1)
    if expect.dtype in ("int8", "uint8"):
        cmp_res = compare_int(output, expect)
    else:
        cmp_res = compare_tensor(output, expect, rtol=rtol, atol=atol)
    return inputs, output, expect, cmp_res
def insn_vec_binary_elemwise_run(shape, dtype, attrs):
    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        mod = utils.op_build_test(
            insn_vec_binary_elemwise.insn_vec_binary_elemwise, [shape, shape],
            [dtype, dtype],
            kernel_name=kernel_name,
            attrs=attrs,
            tuning=t)
        if t:
            args, exp_output, inputs = gen_data(dtype, shape)
            return mod, exp_output, args
        else:
            return mod
    else:
        mod = utils.op_build_test(
            insn_vec_binary_elemwise.insn_vec_binary_elemwise, [shape, shape],
            [dtype, dtype],
            kernel_name='insn_vec_binary_elemwise',
            attrs=attrs)
        args, exp_output, inputs = gen_data(dtype, shape)
        acu_output = utils.mod_launch(mod, args, expect=exp_output)
        # compare result
        TestCase_Result = compare_tensor(acu_output,
                                         exp_output,
                                         rtol=5e-03,
                                         equal_nan=True)

        return inputs, acu_output, exp_output, TestCase_Result
def sigmoid_cross_entropy_with_logits_run(shape1, dtype1, shape2, dtype2,
                                          kernel_name, attrs):
    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        mod = utils.op_build_test(sigmoid_cross_entropy_with_logits.
                                  sigmoid_cross_entropy_with_logits,
                                  [shape1, shape2], [dtype1, dtype2],
                                  kernel_name=kernel_name,
                                  attrs=attrs,
                                  tuning=t)
        if t:
            expect, labels, logits, output = gen_data(dtype1, dtype2, shape1,
                                                      shape2)
            return mod, expect, (labels, logits, output)
        else:
            return mod
    else:
        mod = utils.op_build_test(sigmoid_cross_entropy_with_logits.
                                  sigmoid_cross_entropy_with_logits,
                                  [shape1, shape2], [dtype1, dtype2],
                                  kernel_name=kernel_name,
                                  attrs=attrs)
        expect, labels, logits, output = gen_data(dtype1, dtype2, shape1,
                                                  shape2)
        output = utils.mod_launch(mod, (labels, logits, output), expect=expect)
        compare_res = compare_tensor(output,
                                     expect,
                                     rtol=5e-03,
                                     atol=5e-03,
                                     equal_nan=True)

        return (labels, logits), output, expect, compare_res
def matmul_execute(shape_x, shape_y, bias, left_format, right_format, out_format, adj_x, adj_y, dtype, out_dtype, kernel_name, attrs):
    '''
    There are four types of fractal format in Davinci core: zZ, zN, nZ, nN
    general matmul format
    left_trans: False right_trans False: zZ * nZ = zN
    left_trans: True  right_trans False: nN * nZ = zN
    left_trans: False right_trans True : zZ * zN = zN
    left_trans: True  right_trans True : nN * zN = zN

    Now we need to support: zN * nZ = zN
    use left_format to specify, left matrix data format
    use right_format to specify, right matrix data format
    '''
    batch_tuple, m, k, n = extract_dim(shape_x, shape_y, adj_x, adj_y)
    m = (m + 15) // 16 * 16
    n = (n + 15) // 16 * 16
    k = (k + 15) // 16 * 16
    shape_xx, shape_yy, bias_shape, out_shape, k = get_converted_shapes(m, n, k, batch_tuple, adj_x, adj_y, bias, left_format, right_format, out_format)
    mod = dynamic_matmul_compile(shape_x, shape_y, bias, left_format, right_format, out_format, adj_x, adj_y, dtype, out_dtype, kernel_name, attrs)
    # Generate data
    m_x, m_y, bench_mark, bias_data = matmul_data(batch_tuple, m, k, n, dtype, out_dtype, bias, adj_x, adj_y, left_format, right_format, out_format)

    # mod launch
    output = np.full(out_shape, np.nan, out_dtype)
    if bias == 0:
        output = utils.mod_launch(mod, (m_x, m_y, output, 1, 1, 1, 1, 1, 1, 1, 1, 1), outputs=(2,), expect=bench_mark)
    elif bias == 1:
        output = utils.mod_launch(mod, (m_x, m_y, bias_data, output), expect=bench_mark)

    # compare result
    rtol, atol = get_rtol_atol("matmul", dtype)
    compare_result = compare_tensor(output, bench_mark, rtol=rtol, atol=atol, equal_nan=True)
    # compare_result = utils.result_compare(output, bench_mark, r_tol=5e-3)
    return (m_x, m_y), output, bench_mark, compare_result
Exemple #7
0
def relu_grad_run(shape, dtype, attrs):
    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        mod = utils.op_build_test(relu_grad.relu_grad, [shape, shape],
                                  [dtype, dtype],
                                  kernel_name=kernel_name,
                                  attrs=attrs,
                                  tuning=t)
        if t:
            dy, expect, input_np, output = gen_data(dtype, shape)
            return mod, expect, (input_np, dy, output)
        else:
            return mod
    else:
        mod = utils.op_build_test(relu_grad.relu_grad, [shape, shape],
                                  [dtype, dtype],
                                  kernel_name='relu_grad',
                                  attrs=attrs)
        dy, expect, input_np, output = gen_data(dtype, shape)
        output = utils.mod_launch(mod, (input_np, dy, output), expect=expect)
        rtol, atol = get_rtol_atol("relu_grad", dtype)
        return (input_np, dy), output, expect, compare_tensor(output,
                                                              expect,
                                                              rtol=rtol,
                                                              atol=atol,
                                                              equal_nan=True)
Exemple #8
0
def iou_for_train_run(shape_tensor, shape_tensor1, dtype, kernel_name, attrs):
    # Create op
    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        mod = utils.op_build_test(IOU_for_train.iou_for_train,
                                  [shape_tensor, shape_tensor1],
                                  [dtype, dtype],
                                  kernel_name=kernel_name,
                                  attrs=attrs,
                                  tuning=t)
        if t:
            anchor, expect, ground_truth, output = gen_output_data(
                dtype, shape_tensor, shape_tensor1)
            return mod, expect, (anchor, ground_truth, output)
        else:
            return mod
    else:
        mod = utils.op_build_test(IOU_for_train.iou_for_train,
                                  [shape_tensor, shape_tensor1],
                                  [dtype, dtype],
                                  kernel_name=kernel_name,
                                  attrs=attrs)
        anchor, expect, ground_truth, output = gen_output_data(
            dtype, shape_tensor, shape_tensor1)
        output = utils.mod_launch(mod, (anchor, ground_truth, output),
                                  expect=expect)

        source_code = mod.imported_modules[0].get_source()
        utils.create_code(kernel_name, "./", source_code)
        return input, output, expect, compare_tensor(output,
                                                     expect,
                                                     rtol=5e-03,
                                                     equal_nan=True)
def logprob_ad_run(shape, dtype, kernel_name="", attrs=None):
    expects, head, x, mean, scale, outputs = gen_data(dtype, shape)

    mod = utils.op_build_test(
        distr_normal_diag_logprob_ad.normal_diag_logprob_ad,
        [head.shape, x.shape, mean.shape, scale.shape],
        [dtype, dtype, dtype, dtype],
        kernel_name=kernel_name,
        op_attrs=None,
        attrs=None,
        log_cce=True,
        dump_code=True,
        polyhedral=True,
    )
    outputs = utils.mod_launch(mod, [head, x, mean, scale, *outputs],
                               outputs=tuple(range(-len(outputs), 0)),
                               expect=expects)
    outputs = list(outputs)
    result = True
    for i in range(len(outputs)):
        result &= compare_tensor(outputs[i],
                                 expects[i],
                                 rtol=5e-03,
                                 equal_nan=True)

    return (head, x, mean, scale), outputs, expects, result
Exemple #10
0
def gelu_grad_execute(shape, dtype, attrs):
    np.random.seed(0)

    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        mod = gelu_grad_compile(shape,
                                dtype,
                                attrs,
                                kernel_name=kernel_name,
                                tuning=t)
        if t:
            x, dy, bench_mark, output = gelu_grad_data(shape, dtype)
            return mod, bench_mark, (x, dy, output)
        else:
            return mod
    else:
        mod = gelu_grad_compile(shape, dtype, attrs)
        x, dy, bench_mark, output = gelu_grad_data(shape, dtype)
        output = utils.mod_launch(mod, (x, dy, output), expect=bench_mark)

        rtol, atol = get_rtol_atol("gelu_grad", dtype)
        compare_res = compare_tensor(output,
                                     bench_mark,
                                     rtol=rtol,
                                     atol=atol,
                                     equal_nan=False)

        return (x, dy), output, bench_mark, compare_res
Exemple #11
0
def two2fractal_execute(dim_size, format, dtype, attrs):
    # Generate data
    shape = dim_size
    support_formats = ['zN', 'zZ', 'nZ']
    assert format in support_formats
    assert len(shape) >= 2 and len(shape) <= 4

    # mod launch
    op_attrs = [format]

    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        mod = two2fractal_compile(shape, dtype, op_attrs, attrs, t)
        if t:
            bench_mark, input, output = gen_data(dtype, format, shape)
            return mod, bench_mark, (input, output)
        else:
            return mod
    else:
        mod = two2fractal_compile(shape, dtype, op_attrs, attrs)
        source_code = mod.imported_modules[0].get_source()
        bench_mark, input, output = gen_data(dtype, format, shape)
        output = utils.mod_launch(mod, (input, output), expect=bench_mark)

        # compare result
        rtol, atol = get_rtol_atol("tile", dtype)
        compare_result = compare_tensor(output,
                                        bench_mark,
                                        rtol=rtol,
                                        atol=atol,
                                        equal_nan=True)
        return input, output, bench_mark, compare_result
Exemple #12
0
def div_execute(shape1, shape2, dtype, attrs=None):
    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        mod = div_compile(shape1,
                          shape2,
                          dtype,
                          attrs,
                          kernel_name=kernel_name,
                          tuning=t)
        if t:
            expect, input1, input2, output = gen_data(dtype, shape1, shape2)
            return mod, expect, (input1, input2, output)
        else:
            return mod
    else:
        mod = div_compile(shape1, shape2, dtype, attrs)
        expect, input1, input2, output = gen_data(dtype, shape1, shape2)
        output = utils.mod_launch(mod, (input1, input2, output), expect=expect)
        rtol, atol = get_rtol_atol("div", dtype)
        return (input1, input2), output, expect, compare_tensor(output,
                                                                expect,
                                                                rtol=rtol,
                                                                atol=atol,
                                                                equal_nan=True)
Exemple #13
0
def resize_bilinear_run(in_shape, out_shape, dtype, kernel_name, attrs):
    kernel_name = utils.gen_name_kernel(kernel_name, dtype, in_shape)

    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        mod = utils.op_build_test(resize_bilinear.resize_bilinear,
                                  input_shapes=[in_shape],
                                  input_types=[dtype],
                                  op_attrs=[out_shape],
                                  kernel_name=kernel_name,
                                  attrs=attrs,
                                  tuning=t)
        if t:
            expect, input, output = gen_data(dtype, in_shape, out_shape)
            return mod, expect, (input, output)
        else:
            return mod
    else:
        # Create op
        mod = utils.op_build_test(resize_bilinear.resize_bilinear,
                                  input_shapes=[in_shape],
                                  input_types=[dtype],
                                  op_attrs=[out_shape],
                                  kernel_name=kernel_name,
                                  attrs=attrs)

        expect, input, output = gen_data(dtype, in_shape, out_shape)
        output = utils.mod_launch(mod, (input, output), expect=expect)
        return input, output, expect, compare_tensor(output,
                                                     expect,
                                                     atol=5e-01,
                                                     rtol=5e-03,
                                                     equal_nan=True)
Exemple #14
0
def fill_run(shape, value, dtype, attrs):
    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        mod = utils.op_build_test(fill.fill, [], [],
                                  op_attrs=[shape, value, dtype],
                                  kernel_name=kernel_name,
                                  attrs=attrs,
                                  tuning=t)
        if t:
            expect, input, output = gen_data(dtype, shape, value)
            return mod, expect, (output, )
        else:
            return mod
    else:
        mod = utils.op_build_test(fill.fill, [], [],
                                  op_attrs=[shape, value, dtype],
                                  kernel_name='fill',
                                  attrs=attrs)
        expect, input, output = gen_data(dtype, shape, value)
        output = utils.mod_launch(mod, (output, ), expect=expect)
        return input, output, expect, compare_tensor(output,
                                                     expect,
                                                     rtol=5e-03,
                                                     equal_nan=True)
Exemple #15
0
def reshape_execute(in_shape, out_shape, dtype, attrs):
    if attrs is None:
        attrs = {}
    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        mod = reshape_compile(in_shape,
                              out_shape,
                              dtype,
                              attrs,
                              kernel_name=kernel_name,
                              tuning=t)
        if t:
            expect, input, output = gen_data(dtype, in_shape, out_shape)
            return mod, expect, (input, output)
        else:
            return mod
    else:
        mod = reshape_compile(in_shape, out_shape, dtype, attrs)
        expect, input, output = gen_data(dtype, in_shape, out_shape)
        args = [input, output]
        if attrs.get("dynamic"):
            for index in range(len(out_shape) - 1):
                args.append(out_shape[index])
            for i in in_shape:
                args.append(i)
            block_dim = compute_blockdim(in_shape)
            args.append(block_dim)
        output = utils.mod_launch(mod, args, outputs=(1, ), expect=expect)
        rtol, atol = get_rtol_atol("reshape", dtype)
        return input, output, expect, compare_tensor(output,
                                                     expect,
                                                     rtol=rtol,
                                                     atol=atol,
                                                     equal_nan=True)
Exemple #16
0
def roipool_run(shape, roibox, pooled_shape, dtype, attrs, cce_path="./"):
    mod, output_shape = roipool.roipool(shape,
                                        roibox,
                                        pooled_shape,
                                        dtype,
                                        attrs=attrs)
    input1 = random_gaussian(shape1, miu=1, sigma=0.1)

    if (dtype == "int32"):
        input1 = input1.astype(np.int32)
    elif (dtype == "float16"):
        input1 = input1.astype(np.float16)
    elif (dtype == "float32"):
        input1 = input1.astype(np.float32)

    expect = roipool_expect(input1, shape, roibox, pooled_shape)

    # source_code = mod.imported_modules[0].get_source()
    # utils.create_cce(kernel_name, cce_path, source_code)

    output = np.full(output_shape, np.nan, dtype)
    output = utils.mod_launch(mod, (input1, output), expect=expect)

    return (input1, ), output, expect, compare_tensor(output,
                                                      expect,
                                                      rtol=5e-03,
                                                      equal_nan=True)
Exemple #17
0
def fused_mean_mul_execute(shape1, shape2, dtype, axis, keepdims, kernel_name,
                           attrs):
    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        mod = fused_mean_mul_compile(shape1,
                                     shape2,
                                     dtype,
                                     axis,
                                     keepdims,
                                     kernel_name,
                                     attrs,
                                     tuning=t)
        if t:
            expect, input1, input2, output = gen_data(shape1, shape2, dtype,
                                                      axis, keepdims)
            return mod, expect, (input1, input2, output)
        else:
            return mod
    else:
        expect, input1, input2, output = gen_data(shape1, shape2, dtype, axis,
                                                  keepdims)
        mod = fused_mean_mul_compile(shape1, shape2, dtype, axis, keepdims,
                                     kernel_name, attrs)
        output = utils.mod_launch(mod, (input1, input2, output), expect=expect)
        return (input1, input2), output, expect, compare_tensor(output,
                                                                expect,
                                                                rtol=5e-03,
                                                                equal_nan=True)
Exemple #18
0
def maxpool_run(shape, kernel, stride, pad, hybrid, dtype, attrs=None, polyhedral=True):
    if attrs.get("dynamic"):
        var_shape = []
        for i in range(len(shape)):
            if i == len(shape) - 1:
                var_shape.append(shape[i])
            else:
                var_shape.append(tvm.var("I" + str(i)))
        build_shape = var_shape
    else:
        build_shape = shape
    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        mod = utils.op_build_test(maxpool.maxpool, [build_shape], [dtype], op_attrs=[kernel, stride, pad],
                                  kernel_name=kernel_name, attrs=attrs, tuning=t)
        if t:
            expect, input, out_shape, res = gen_data(dtype, kernel, pad, shape, stride)
            return mod, expect,  {"args": (input, res), 'outputs': (-1, ), 'tuning': False}
        else:
            return mod
    else:
        if polyhedral:
            if hybrid:
                mod = utils.op_build_test(maxpool.maxpool, [build_shape], [dtype], op_attrs=[kernel, stride, pad],
                                          kernel_name='maxpool', attrs=attrs)
            else:
                mod = utils.op_build_test(maxpool.old_maxpool, [build_shape], [dtype], op_attrs=[kernel, stride, pad],
                                          kernel_name='maxpool_old', attrs=attrs)
        else:
            mod = maxpool.maxpool_manual_schedule(build_shape, kernel, stride, pad, dtype,attrs=attrs, polyhedral=polyhedral)
        expect, input, out_shape, res = gen_data(dtype, kernel, pad, shape, stride)
        output = utils.mod_launch(mod, [input, res], expect=expect)
        rtol, atol = get_rtol_atol("maxpool", dtype)
        return input, output, expect, compare_tensor(output, expect, rtol=rtol, atol=atol, equal_nan=True)
Exemple #19
0
def square_difference_run(shape1,
                          shape2,
                          dtype,
                          kernel_name,
                          attrs,
                          cce_path="./"):
    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        mod = utils.op_build_test(square_difference.square_difference,
                                  input_shapes=[shape1, shape2],
                                  input_types=[dtype, dtype],
                                  kernel_name=kernel_name,
                                  attrs=attrs,
                                  tuning=t)
        if t:
            expect, input1, input2, output = gen_data(dtype, shape1, shape2)
            return mod, expect, (input1, input2, output)
        else:
            return mod
    else:
        mod = utils.op_build_test(square_difference.square_difference,
                                  input_shapes=[shape1, shape2],
                                  input_types=[dtype, dtype],
                                  kernel_name=kernel_name,
                                  attrs=attrs)
        expect, input1, input2, output = gen_data(dtype, shape1, shape2)
        source_code = mod.imported_modules[0].get_source()
        utils.create_code(kernel_name, cce_path, source_code)
        output = utils.mod_launch(mod, (input1, input2, output), expect=expect)
        return (input1, input2), output, expect, compare_tensor(output,
                                                                expect,
                                                                rtol=5e-03,
                                                                equal_nan=True)
Exemple #20
0
def focal_loss_run(shape, p_dtype, t_dtype, gamma, kernel_name, attrs):
    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        mod = utils.op_build_test(focal_loss.focal_loss, [shape, shape],
                                  [p_dtype, t_dtype],
                                  op_attrs=[gamma],
                                  kernel_name=kernel_name,
                                  attrs=attrs,
                                  tuning=t)
        if t:
            expect, pred, targ = gen_data(attrs, gamma, p_dtype, shape,
                                          t_dtype)
            output = np.full(expect.shape, 0.0, p_dtype)
            return mod, expect, (pred, targ, output)
        else:
            return mod
    else:
        mod = utils.op_build_test(focal_loss.focal_loss, [shape, shape],
                                  [p_dtype, t_dtype],
                                  op_attrs=[gamma],
                                  kernel_name=kernel_name,
                                  attrs=attrs)
        expect, pred, targ = gen_data(attrs, gamma, p_dtype, shape, t_dtype)
        output = np.full(expect.shape, 0.0, p_dtype)
        output = utils.mod_launch(mod, (pred, targ, output), expect=expect)

        return (pred, targ), output, expect, compare_tensor(output,
                                                            expect,
                                                            rtol=5e-2,
                                                            atol=1e-4)
Exemple #21
0
def scatter_nd_ad_run(indices_shape, data_shape, output_shape, indices_dtype, dtype, attrs):
    kernel_name = utils.gen_name_kernel("scatter_nd_ad", dtype, data_shape)

    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        mod = utils.op_build_test(scatter_nd_ad, [output_shape, indices_shape, data_shape],
                                  [dtype, indices_dtype, dtype], op_attrs=[output_shape], kernel_name=kernel_name,
                                  attrs=attrs, tuning=t)
        if t:
            data_input, expect, head_input, indicies_input, output = gen_data(data_shape, dtype, indices_dtype,
                                                                              indices_shape, output_shape)
            return mod, expect, (head_input, indicies_input, data_input, output)
        else:
            return mod
    else:
        data_input, expect, head_input, indicies_input, output = gen_data(data_shape, dtype, indices_dtype,
                                                                          indices_shape, output_shape)
        mod = utils.op_build_test(scatter_nd_ad, [output_shape, indices_shape, data_shape],
                                  [dtype, indices_dtype, dtype], op_attrs=[output_shape], kernel_name=kernel_name,
                                  attrs=attrs)
        output = utils.mod_launch(mod, (head_input, indicies_input, data_input, output), expect=expect)

        return (head_input, indicies_input, data_input), output, expect, compare_tensor(output, expect, rtol=5e-03,
                                                                                        equal_nan=True)
Exemple #22
0
def sub_ad_run(ashape, bshape, dtype, kernel_name, attrs):
    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        a, b, out = gen_input_data(ashape, bshape, dtype)
        mod = utils.op_build_test(sub_ad, [out.shape, ashape, bshape],
                                  [dtype, dtype, dtype],
                                  kernel_name=kernel_name,
                                  attrs=attrs,
                                  tuning=t)
        if t:
            expect, head_np, output = gen_data(dtype, out)
            return mod, expect, (head_np, a, b, output)
        else:
            return mod
    else:
        a, b, out = gen_input_data(ashape, bshape, dtype)
        expect, head_np, output = gen_data(dtype, out)
        mod = utils.op_build_test(sub_ad, [out.shape, ashape, bshape],
                                  [dtype, dtype, dtype],
                                  kernel_name=kernel_name,
                                  attrs=attrs)
        output = utils.mod_launch(mod, (head_np, a, b, output), expect=expect)
        return (head_np, a, b), output, expect, compare_tensor(output,
                                                               expect,
                                                               atol=0.1)
Exemple #23
0
def triangle_execute(shape, const_value, lower, dtype, attrs):
    support_type = ['float16', 'float32']
    assert dtype in support_type
    assert len(shape) <= 2
    if attrs is None:
        attrs = {'enable_pre_poly_loop_partition': False}

    attrs['enable_pre_poly_loop_partition'] = False
    attrs['enable_post_poly_loop_partition'] = False
    attrs['enable_convert_if'] = True
    attrs['enable_double_buffer'] = False

    output_shape = shape
    if len(shape) == 1:
        output_shape = [shape[0], shape[0]]

    input, bench_mark = gen_data(shape, output_shape, const_value, lower,
                                 dtype)

    op_attrs = [const_value, lower]
    mod = triangle_compile(shape, dtype, op_attrs, attrs)
    source_code = mod.imported_modules[0].get_source()

    output = np.full(output_shape, np.nan, dtype)
    output = utils.mod_launch(mod, (input, output), expect=bench_mark)

    # compare result
    compare_result = compare_tensor(output,
                                    bench_mark,
                                    rtol=5e-3,
                                    equal_nan=True)
    return input, output, bench_mark, compare_result
def discontinous_mov_run(shapes, dtype, attrs):
    # Result_Numpy
    shape1 = shapes[0]
    shape2 = shapes[1]
    op_attrs = [shape2]

    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        mod = utils.op_build_test(discontinous_mov.discontinous_mov, [shape1], [dtype], op_attrs,
                                  kernel_name=kernel_name, attrs=attrs, tuning=t)
        if t:
            args, exp_output, input = gen_data(dtype, shape1, shape2)
            return mod, exp_output, args
        else:
            return mod
    else:
        mod = utils.op_build_test(discontinous_mov.discontinous_mov, [shape1], [dtype], op_attrs,
                                  kernel_name='discontinous_mov', attrs=attrs)
        args, exp_output, input = gen_data(dtype, shape1, shape2)
        acu_output = utils.mod_launch(mod, args, expect=exp_output)

        # compare result
        TestCase_Result = compare_tensor(acu_output, exp_output, rtol=5e-03, equal_nan=True)
        return input, acu_output, exp_output, TestCase_Result
Exemple #25
0
def log_ad_run(shape, dtype, kernel_name, attrs):
    input_shape = [shape, shape]
    input_dtype = [dtype, dtype]

    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        mod = utils.op_build_test(log_ad.log_ad,
                                  input_shape,
                                  input_dtype,
                                  kernel_name=kernel_name,
                                  attrs=attrs,
                                  tuning=t)
        if t:
            expect, head_np, input, output = gen_data(dtype, shape)
            return mod, expect, (head_np, input, output)
        else:
            return mod
    else:
        mod = utils.op_build_test(log_ad.log_ad,
                                  input_shape,
                                  input_dtype,
                                  kernel_name=kernel_name,
                                  attrs=attrs)
        expect, head_np, input, output = gen_data(dtype, shape)
        output = utils.mod_launch(mod, (head_np, input, output), expect=expect)
        rtol, atol = get_rtol_atol("log_ad", dtype)
        return input, output, expect, compare_tensor(output,
                                                     expect,
                                                     rtol=rtol,
                                                     atol=atol,
                                                     equal_nan=True)
def invert_permutation_run(shape, dtype, attrs):
    # check shapes
    vc_util.check_shape(shape)

    if not (dtype.lower() in "int32"):
        raise RuntimeError(
            "indices_dtype only support int32 while dtype is %s" % dtype)

    A = akg.tvm.placeholder(shape, dtype, name="A")
    op = invert_permutation.invert_permutation(A)
    s = akg.tvm.create_schedule(op.op)

    kernel_name = utils.gen_name_kernel("invert_permutation", dtype, shape)
    with akg.build_config(add_lower_pass=cce.debug_mode(0), dump_pass_ir=True):
        mod = akg.build(s, [A, op],
                        "cce",
                        name=kernel_name,
                        attrs=attrs,
                        polyhedral=True)

    input_data = np.random.permutation(np.arange(shape[0])).astype(np.int32)
    expect = np.full([shape[0]], 0, np.int32)
    for i, e in enumerate(input_data):
        expect[e] = i

    output = np.full([shape[0]], 0, np.int32)
    output = utils.mod_launch(mod, (input_data, output), expect=expect)

    return (input_data, ), output, expect, compare_tensor(output,
                                                          expect,
                                                          rtol=5e-03,
                                                          equal_nan=True)
Exemple #27
0
def rsqrt_grad_run(shape, dtype, kernel_name, attrs, cce_path="./"):
    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        mod = utils.op_build_test(rsqrt_grad.rsqrt_grad, [shape, shape],
                                  [dtype, dtype],
                                  kernel_name=kernel_name,
                                  attrs=attrs,
                                  tuning=t)
        if t:
            expect, grad, input, output = gen_data(dtype, shape)
            return mod, expect, (input, grad, output)
        else:
            return mod
    else:
        expect, grad, input, output = gen_data(dtype, shape)
        mod = utils.op_build_test(rsqrt_grad.rsqrt_grad, [shape, shape],
                                  [dtype, dtype],
                                  kernel_name=kernel_name,
                                  attrs=attrs)
        output = utils.mod_launch(mod, (input, grad, output), expect=expect)

        return (input, grad), output, expect, compare_tensor(output,
                                                             expect,
                                                             rtol=5e-03,
                                                             equal_nan=True)
def fused_mul_unsortedsegmentsum_execute(shape1, shape2, ids_shape,
                                         num_segments, dtype, attrs):
    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        mod = fused_mul_unsortedsegmentsum_compile(shape1,
                                                   shape2,
                                                   ids_shape,
                                                   num_segments,
                                                   dtype,
                                                   attrs,
                                                   kernel_name=kernel_name,
                                                   tuning=t)
        if t:
            expect, input1, input2, output, segment_ids = gen_data(
                dtype, ids_shape, num_segments, shape1, shape2)
            return mod, expect, (input1, input2, segment_ids, output)
        else:
            return mod
    else:
        expect, input1, input2, output, segment_ids = gen_data(
            dtype, ids_shape, num_segments, shape1, shape2)
        mod = fused_mul_unsortedsegmentsum_compile(shape1, shape2, ids_shape,
                                                   num_segments, dtype, attrs)
        output = utils.mod_launch(mod, (input1, input2, segment_ids, output),
                                  expect=expect)

        return (input1, input2, segment_ids,
                num_segments), output, expect, compare_tensor(output,
                                                              expect,
                                                              rtol=5e-03,
                                                              equal_nan=True)
Exemple #29
0
def dropout_execute(shape_tensor, keep_prob, dtype, kernel_name, attrs=None):
    # Create op
    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        mod = dropout_compile(shape_tensor,
                              keep_prob,
                              dtype,
                              kernel_name,
                              attrs,
                              tuning=t)
        if t:
            expect, input, output, mask = gen_data(dtype, shape_tensor,
                                                   keep_prob)
            return mod, expect, (input, mask, output)
        else:
            return mod
    else:
        mod = dropout_compile(shape_tensor, keep_prob, dtype, kernel_name,
                              attrs)
        expect, input, output, mask = gen_data(dtype, shape_tensor, keep_prob)
        output = utils.mod_launch(mod, (input, mask, output), expect=expect)

        source_code = mod.imported_modules[0].get_source()
        utils.create_code(kernel_name, "./", source_code)

        rtol, atol = get_rtol_atol("dropout", dtype)
        return (input, mask), output, expect, compare_tensor(output,
                                                             expect,
                                                             rtol=rtol,
                                                             atol=atol,
                                                             equal_nan=True)
Exemple #30
0
def batch_norm_run(shape, dtype, eps, kernel_name, attrs=None,  polyhedral=True):
    if len(shape) == 5 :
        (n, c, h, w, c0) = shape
    else :
        (n, c, h, w) = shape
        c0 = None

    support_list = {"float16": np.float16, "float32": np.float32}
    op_attrs = [eps, polyhedral, attrs]

    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        np_beta, np_gamma, np_mean, np_var = get_input_data(c, c0, dtype, support_list)
        mod = utils.op_build_test(batch_norm.batch_norm,
                                  [shape, np_mean.shape, np_var.shape, np_gamma.shape, np_beta.shape],
                                  [dtype, dtype, dtype, dtype, dtype], op_attrs, kernel_name=kernel_name, attrs=attrs,
                                  polyhedral=polyhedral, tuning=t)
        if t:
            expect, np_data, output = gen_data(c, c0, dtype, eps, np_beta, np_gamma, np_mean, np_var, shape, support_list)
            return mod, expect, (np_data, np_mean, np_var, np_gamma, np_beta, output)
        else:
            return mod
    else:
        np_beta, np_gamma, np_mean, np_var = get_input_data(c, c0, dtype, support_list)
        mod = utils.op_build_test(batch_norm.batch_norm,
                                  [shape, np_mean.shape, np_var.shape, np_gamma.shape, np_beta.shape],
                                  [dtype, dtype, dtype, dtype, dtype], op_attrs, kernel_name=kernel_name, attrs=attrs,
                                  polyhedral=polyhedral)
        expect, np_data, output = gen_data(c, c0, dtype, eps, np_beta, np_gamma, np_mean, np_var, shape, support_list)
        output = utils.mod_launch(mod, (np_data, np_mean, np_var, np_gamma, np_beta, output), expect=expect)
        return (np_data, np_mean, np_var, np_gamma, np_beta), output, expect, compare_tensor(output, expect, atol=0.01)