Exemple #1
0
def gen_data(dtype, shape, with_l2_shrinkage=False):
    """Generate data for testing the op"""

    # tensors
    var = random_gaussian(shape).astype(dtype)
    accum = np.abs(random_gaussian(shape).astype(dtype))
    linear = random_gaussian(shape).astype(dtype)
    grad = random_gaussian(shape).astype(dtype)
    tensors = [var, accum, linear, grad]

    # scalars
    scalar_shape = (1, )
    lr = np.random.random_sample(scalar_shape).astype(dtype)
    l1 = np.random.random_sample(scalar_shape).astype(dtype)
    l2 = np.random.random_sample(scalar_shape).astype(dtype)
    lr_power = np.array([0.5], dtype)
    if with_l2_shrinkage:
        l2_shrinkage = np.random.random_sample(scalar_shape).astype(dtype)
        scalars = [lr, l1, l2, l2_shrinkage, lr_power]
    else:
        scalars = [lr, l1, l2, lr_power]

    # expects
    expects = apply_ftrl_impl(tensors, scalars, with_l2_shrinkage)

    return expects, tensors, scalars
Exemple #2
0
def add_ad_run(ashape, bshape, dtype, kernel_name="add", scale=1.0, attrs_op={}, polyhedral=True, attrs={}):
    if type(scale) is not float or not int:
        if type(attrs_op) is not bool:
            scale, attrs_op = 1.0, scale
        else:
            scale, attrs_op, polyhedral = 1.0, scale, attrs_op

    attrs.update(attrs_op)
    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        a = random_gaussian(ashape, miu=1, sigma=0.1).astype(dtype)
        b = random_gaussian(bshape, miu=1, sigma=0.1).astype(dtype)
        out = np.add(a, b * scale)
        mod = utils.op_build_test(add_ad, [out.shape, ashape, bshape], [dtype, dtype, dtype], kernel_name=kernel_name,
                                  op_attrs=[scale], attrs=attrs, polyhedral=polyhedral, tuning=t)
        if t:
            expect, head_np, output = gen_data(dtype, out)
            return mod, expect, (head_np, a, b, output)
        else:
            return mod
    else:
        a = random_gaussian(ashape, miu=1, sigma=0.1).astype(dtype)
        b = random_gaussian(bshape, miu=1, sigma=0.1).astype(dtype)
        out = np.add(a, b * scale)
        mod = utils.op_build_test(add_ad, [out.shape, ashape, bshape], [dtype, dtype, dtype], kernel_name='add_ad',
                                  op_attrs=[scale], attrs=attrs, polyhedral=polyhedral)
        expect, head_np, output = gen_data(dtype, out)
        output = utils.mod_launch(mod, (head_np, a, b, output), expect=expect)
        return (head_np, a, b), output, expect, compare_tensor(output, expect, atol=0.1)
Exemple #3
0
def gen_data(shape1, dtype1, shape2, dtype2):
    """generate valid data for arctangent"""
    head = random_gaussian(shape1, miu=0, sigma=0.5).astype(dtype1)
    input_x = random_gaussian(shape2, miu=0, sigma=0.5).astype(dtype2)
    expect = np.divide(1, np.add(1, np.square(input_x))) * head
    out_buf = np.full(shape1, np.nan, dtype1)
    return expect, (head, input_x), out_buf
Exemple #4
0
def avgpool_ad_run(shape, kernel, stride, pad, dtype, polyhedral=False, attrs=None):
    support_list = {"float16": np.float16, "float32": np.float32}
    if attrs is None:
        attrs = {'loop_partition_unroll': True}
    else:
        attrs['loop_partition_unroll'] = True

    kernel_name = 'avgpool_ad'
    if polyhedral:
        avgpool = avgpool_ad
    else:
        kernel_name = kernel_name + "_manual_schedule"
        avgpool = avgpool_ad_no_custom_diff_manual_schedule

    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        input = random_gaussian(shape, miu=1, sigma=0.1).astype(support_list[dtype])
        y = avgpool_run.benchmark(input, kernel, stride, pad)
        mod = utils.op_build_test(avgpool, [y.shape, shape], [dtype, dtype], op_attrs=[kernel, stride, pad],
                                  kernel_name=kernel_name, attrs=attrs, log_code=True, dump_code=True, tuning=t)
        if t:
            expect, head, output = gen_data(dtype, input, kernel, pad, stride, support_list, y)
            return mod, expect, (head, input, output)
        else:
            return mod
    else:
        input = random_gaussian(shape, miu=1, sigma=0.1).astype(support_list[dtype])
        y = avgpool_run.benchmark(input, kernel, stride, pad)
        mod = utils.op_build_test(avgpool, [y.shape, shape], [dtype, dtype], op_attrs=[kernel, stride, pad],
                                  kernel_name=kernel_name, attrs=attrs, log_code=True, dump_code=True)
        expect, head, output = gen_data(dtype, input, kernel, pad, stride, support_list, y)
        output = utils.mod_launch(mod, [head, input, output], expect=expect)

        return [head, input], output, expect, compare_tensor(output, expect, rtol=5e-03, atol=5e-03, equal_nan=True)
Exemple #5
0
def gen_data(shape_ref, shape_indices, dtype_ref, dtype_indices):
    ref = random_gaussian(shape_ref, miu=10, sigma=0.3).astype(dtype_ref)
    new_ref = ref.copy()

    # generate valid index
    indices = np.random.randint(low=0,
                                high=shape_ref[0],
                                size=shape_indices,
                                dtype=dtype_indices)

    # reshape to a 1D tensor to index
    all_shape = np.prod(shape_indices).astype(dtype_indices)
    new_indices = np.reshape(indices, (all_shape, ))

    # according to indices shape and ref shape to make updates shape
    updates_shape = shape_indices + shape_ref[1:]
    updates = random_gaussian(updates_shape, miu=3,
                              sigma=0.3).astype(dtype_ref)

    # according to new_indieces shape and ref shape to make new_update_shape, make sure to update base on new_indices
    new_updates_shape = new_indices.shape + shape_ref[1:]
    new_updates = np.reshape(updates, new_updates_shape)

    # get results by new_updates
    for i in range(new_indices.shape[0]):
        new_ref[new_indices[i], ] += new_updates[i, ]

    output = np.full(shape_ref, np.nan, dtype_ref)
    args = [ref, indices, updates, output]
    return args, new_ref, ref, indices, updates,
Exemple #6
0
def gen_data(shape, dtype):
    var = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
    m = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
    grad = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
    lr = np.random.rand(1).astype(dtype)
    logbase = np.random.rand(1).astype(dtype)
    sign_decay = np.random.rand(1).astype(dtype)
    beta = np.random.rand(1).astype(dtype)

    inputs = [var, m, grad, lr, logbase, sign_decay, beta]

    if dtype == "float16":
        var, m, grad, lr, logbase, sign_decay, beta = [i.astype("float32") for i in inputs]
    one = np.array([1]).astype(var.dtype)
    m_out = m * beta + grad * (one - beta)
    var_out = var - lr * np.exp(logbase * sign_decay * np.sign(grad) * np.sign(m_out)) * grad

    if dtype == "float16":
        exp_output = (var_out.astype(dtype), m_out.astype(dtype))
    else:
        exp_output = (var_out, m_out)

    args = inputs

    return exp_output, inputs, args
Exemple #7
0
def gen_data(fm_shape, w_shape, pad, stride, dilation, bias, expect_file):

    conv_param = {'stride': stride, 'pad': pad, 'dilation': dilation}
    stride, pad, dilation = conv_param_prepare(conv_param)
    fm_shape, w_shape, out_shape = conv_shape_4d(fm_shape, w_shape, pad,
                                                 stride, dilation)
    IN, IC, IH, IW = fm_shape
    WN, WC, WH, WW = w_shape

    x = random_gaussian((IN, IC, IH, IW), miu=1, sigma=0.1).astype(np.float16)
    w = random_gaussian((WN, WC, WH, WW), miu=0.5,
                        sigma=0.01).astype(np.float16)

    if bias:
        b = random_gaussian((WN, ), miu=1, sigma=0.1).astype(np.float16)
    else:
        b = (np.array(np.zeros(WN))).astype(np.float16, copy=False)

    flag_w = os.environ.get("WRITE_TO_DISK", "No")
    if (flag_w == "No") and (os.path.exists(expect_file) == True):
        #read expect from file
        out = np.fromfile(expect_file, np.float16).reshape(out_shape)
    else:
        #compute expect data:
        out = conv_forward_naive(x.astype(np.float32), w.astype(np.float32),
                                 b.astype(np.float32), conv_param)
        out = out.astype(np.float16)

    if flag_w == "Yes":
        # write expect to file
        with open(expect_file, "w+") as file:
            out.tofile(file)
            file.close()

    return conv_tensor_4d_to_5d(x, w, b, out)
Exemple #8
0
def gen_data(shape, dtype, lr, momentum, rho, epsilon):
    """Generates input, output and expect data."""
    var = random_gaussian(shape, miu=10, sigma=1.0).astype(dtype)
    ms = np.abs(random_gaussian(shape, miu=4, sigma=0.1).astype(dtype))
    mom = random_gaussian(shape, miu=3, sigma=0.3).astype(dtype)
    grad = random_gaussian(shape, miu=3, sigma=0.3).astype(dtype)
    lr = np.array([lr]).astype(dtype)
    momentum = np.array([momentum]).astype(dtype)
    rho = np.array([rho]).astype(dtype)

    inputs = [var, ms, mom, grad, lr, momentum, rho]

    # ms = rho * ms + (1-rho) * grad * grad
    # mom = momentum * mom + lr * grad / sqrt(ms + epsilon)
    # var = var - mom
    one = np.array([1.0]).astype(dtype)
    ms_1 = rho * ms
    ms_2 = (one - rho) * grad * grad
    ms_update = ms_1 + ms_2
    mom_1 = momentum * mom
    mom_2_1 = lr * grad
    mom_2_2 = one / np.sqrt(ms_update + epsilon)
    mom_3 = mom_2_1 * mom_2_2
    mom_update = mom_1 + mom_3
    var_update = var - mom_update

    expects = (var_update, var_update.astype("float16"), ms_update, mom_update)
    outputs = np.full(var_update.shape, np.nan, "float16")
    args = [*inputs, outputs]

    return inputs, expects, args
Exemple #9
0
def gen_data(shape1, shape2, shape3, shape4, data_type, indices_type, axis,
             keepdims, num):
    input1 = random_gaussian(shape1, miu=1, sigma=0.1).astype(data_type)
    out_dim1 = 1
    for i in range(len(shape2) - 1):
        out_dim1 = out_dim1 * shape2[i]
    input2 = np.zeros([shape2[-1], out_dim1]).astype(indices_type)
    for i in range(shape2[-1]):
        input2[i] = np.random.randint(low=0, high=shape1[i], size=out_dim1)
    input3 = random_gaussian(shape3, miu=1, sigma=0.1).astype(data_type)
    prod = np.sum(input1[tuple(input2.tolist())], axis=axis,
                  keepdims=keepdims) * input3

    input4 = np.random.randint(low=0, high=10,
                               size=shape4).astype(indices_type)
    input5 = np.random.randint(low=0, high=10,
                               size=shape4).astype(indices_type)
    expect1 = np.zeros((num, ) + shape3[len(shape4):]).astype(data_type)
    expect2 = np.zeros((num, ) + shape3[len(shape4):]).astype(data_type)
    np.add.at(expect1, input4, prod)
    np.add.at(expect2, input5, prod)

    input2 = input2.transpose()
    input2 = input2.reshape(shape2)
    return input1, input2, input3, input4, input5, expect1, expect2
Exemple #10
0
def get_input_data(dtype, kernel, pad, shape, stride, support_list):
    # Generate data for testing the op
    x = random_gaussian(shape, miu=1, sigma=0.1).astype(support_list[dtype])
    y = avgpool_run.benchmark(x, kernel, stride, pad)
    dy = random_gaussian(y.shape, miu=1, sigma=0.1).astype(support_list[dtype])
    dy = np.abs(dy)
    return dy, x, y
Exemple #11
0
def gen_data(shape1, dtype1, shape2, dtype2):
    """generate valid data for arctangent2"""
    input1 = random_gaussian(shape1, miu=0, sigma=0.5).astype(dtype1)
    input2 = random_gaussian(shape2, miu=0, sigma=0.5).astype(dtype2)
    expect = np.arctan2(input1, input2)
    out_buf = np.full(shape1, np.nan, dtype1)
    return expect, (input1, input2), out_buf
Exemple #12
0
def gen_data(dtype, shape):
    """Generate data for testing the op"""
    y = random_gaussian(size=shape).astype(dtype)
    dy = random_gaussian(size=shape).astype(dtype)
    expect = _asinh_grad_compute(y, dy)
    output = np.full(expect.shape, np.nan, dtype)
    return expect, [y, dy], output
Exemple #13
0
def gen_data(dtype, shape, use_nesterov=False):
    """Generate data for testing the op"""

    # tensors
    var = random_gaussian(shape).astype(dtype)
    m = random_gaussian(shape).astype(dtype)
    v = np.abs(random_gaussian(shape).astype(dtype))
    grad = random_gaussian(shape).astype(dtype)
    tensors = [var, m, v, grad]

    # scalars
    lr = np.array([0.001], dtype)
    beta1 = np.array([0.9], dtype)
    beta2 = np.array([0.999], dtype)
    epsilon = np.array([1e-7], dtype)
    t = np.random.randint(1, 100, size=(1, ))
    beta1_power = np.array([beta1**t], dtype)
    beta2_power = np.array([beta2**t], dtype)
    sclars = [beta1_power, beta2_power, lr, beta1, beta2, epsilon]

    # expects
    lr_coffient = np.sqrt(1.0 - beta2_power) / (1.0 - beta1_power)
    lr_t = lr * lr_coffient
    m_t = m + (1.0 - beta1) * (grad - m)
    v_t = v + (1.0 - beta2) * (grad * grad - v)
    v_t_sqrt = np.sqrt(v_t)
    if use_nesterov:
        var_t = var - (lr_t * (m_t * beta1 +
                               (1.0 - beta1) * grad)) / (epsilon + v_t_sqrt)
    else:
        var_t = var - (lr_t * m_t) / (epsilon + v_t_sqrt)
    expects = [var_t, m_t, v_t]
    return expects, tensors, sclars
Exemple #14
0
def mul_ad_run(ashape, bshape, dtype, kernel_name, attrs):
    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        a = random_gaussian(ashape, miu=1, sigma=0.1).astype(dtype)
        b = random_gaussian(bshape, miu=1, sigma=0.1).astype(dtype)
        out = np.multiply(a, b)
        mod = utils.op_build_test(mul_ad, [out.shape, ashape, bshape],
                                  [dtype, dtype, dtype],
                                  kernel_name=kernel_name,
                                  attrs=attrs,
                                  tuning=t)
        if t:
            expect, head_np, output = gen_data(b, dtype, out)
            return mod, expect, (head_np, a, b, output)
        else:
            return mod
    else:
        a = random_gaussian(ashape, miu=1, sigma=0.1).astype(dtype)
        b = random_gaussian(bshape, miu=1, sigma=0.1).astype(dtype)
        out = np.multiply(a, b)
        expect, head_np, output = gen_data(b, dtype, out)
        mod = utils.op_build_test(mul_ad, [out.shape, ashape, bshape],
                                  [dtype, dtype, dtype],
                                  kernel_name=kernel_name,
                                  attrs=attrs)
        output = utils.mod_launch(mod, (head_np, a, b, output), expect=expect)
        return (head_np, a, b), output, expect, compare_tensor(output,
                                                               expect,
                                                               atol=0.1)
Exemple #15
0
def gen_data(shape1, shape2, shape3, shape4, dtype1, dtype2, axis):
    # gather
    input1 = random_gaussian(shape1).astype(dtype1)
    input2 = np.random.randint(low=0, high=shape1[axis],
                               size=shape2).astype(dtype2)
    gather_out = np.take(input1, input2, axis=axis)

    # mul
    input3 = random_gaussian(shape3).astype(dtype1)
    mul_out = np.multiply(gather_out, input3)

    # scatter_add
    params = np.zeros(shape1, dtype1)
    #params = random_gaussian(shape1).astype(dtype1)
    indices = np.zeros(shape4, dtype2)
    original_shape = indices.shape
    indices = indices.reshape(-1, indices.shape[-1])
    for i in range(indices.shape[0]):
        for j in range(indices.shape[1]):
            indices[i][j] = np.random.randint(shape1[j], size=())

    indices = indices.reshape(original_shape)
    expect = deepcopy(params)
    np.add.at(expect, tuple(indices.T.tolist()), mul_out)
    indices = indices.reshape(shape4)
    return input1, input2, input3, indices, expect
Exemple #16
0
def gen_data(shape, dtype, shape_origin, format, out_dtype):
    if format == 'zN':
        input = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
        n1, m1, m0, n0 = shape[-4:]
        new_shape = shape[:-4] + [m1 * m0, n1 * n0]
        tranpose_axis = [1, 2, 0, 3]
    elif format == 'zZ':
        input = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
        m1, n1, m0, n0 = shape[-4:]
        new_shape = shape[:-4] + [m1 * m0, n1 * n0]
        tranpose_axis = [0, 2, 1, 3]

    tranpose_axis = [x + len(shape) - 4 for x in tranpose_axis]
    tranpose_axis = [i for i in range(len(shape) - 4)] + tranpose_axis
    bench_mark = input.transpose(tranpose_axis).reshape(new_shape)
    if new_shape != shape_origin:
        if len(shape_origin) == 2:
            bench_mark = bench_mark[:shape_origin[0], :shape_origin[1]].astype(
                out_dtype)
        elif len(shape_origin) == 3:
            bench_mark = bench_mark[:,
                                    shape_origin[0], :shape_origin[1]].astype(
                                        out_dtype)
        elif len(shape_origin) == 4:
            bench_mark = bench_mark[:, :,
                                    shape_origin[0], :shape_origin[1]].astype(
                                        out_dtype)
        new_shape = shape_origin
    output = np.full(new_shape, np.nan, out_dtype)
    return output, input, bench_mark
Exemple #17
0
def gen_data(shape_matrix, shape_diagonal, dtype):
    """generate valid data to test"""
    input_matrix = random_gaussian(shape_matrix, miu=10,
                                   sigma=0.3).astype(dtype)
    input_diagonal = random_gaussian(shape_diagonal, miu=5,
                                     sigma=0.3).astype(dtype)
    # make shape_diagonal can support broadcast
    if shape_matrix[-2] <= shape_matrix[-1]:
        shape_b_newshape = list(shape_diagonal) + [1]
    # The penultimate dimension of the shape_diag is extended for broadcast.
    else:
        shape_b_newshape = list(shape_diagonal)
        shape_b_newshape.insert(-1, 1)
    new_input_diagonal = np.reshape(input_diagonal, shape_b_newshape)
    # need to generate a multi dimensional 01 diagonal matrix by broadcasting a 2D 01 diagonal matrix
    input_help = np.zeros((shape_matrix[-2], shape_matrix[-1]))
    for i in range(min(shape_matrix[-2], shape_matrix[-1])):
        input_help[i, i] = 1.0
    input_help = np.broadcast_to(input_help, shape_matrix)
    input_help = input_help.astype(dtype)
    if dtype == 'uint8':
        new_help = np.abs(input_help.astype('float16') - 1).astype(dtype)
    else:
        new_help = np.abs(input_help - 1)

    exp_output = input_matrix * new_help + input_help * new_input_diagonal
    # inputs and output to hold the data
    output = np.full(shape_matrix, np.nan, dtype)
    args = [input_matrix, input_diagonal, input_help, output]
    return args, exp_output, input_matrix, input_diagonal, input_help
Exemple #18
0
def gen_data(bs, m, n, k, shape_bias, trans_a, trans_b, dtype):
    shape_a, shape_b, shape_out = get_shape(bs, m, n, k, trans_a, trans_b)
    matrix_a = random_gaussian(shape_a, miu=0.5, sigma=0.01).astype(dtype)
    matrix_b = random_gaussian(shape_b, miu=0.5, sigma=0.01).astype(dtype)
    if len(shape_bias) > 0:
        matrix_bias = random_gaussian(shape_bias, miu=0.5,
                                      sigma=0.01).astype(dtype)
    else:
        matrix_bias = np.zeros(shape_bias, dtype=dtype)

    # cast to float32 for fast compute in numpy
    if dtype == "float16":
        matrix_a_for_np = matrix_a.astype(np.float32)
        matrix_b_for_np = matrix_b.astype(np.float32)
        matrix_bias_for_np = matrix_bias.astype(np.float32)
    else:
        matrix_a_for_np = matrix_a
        matrix_b_for_np = matrix_b
        matrix_bias_for_np = matrix_bias
    if trans_a and trans_b:
        res = np.matmul(np.swapaxes(matrix_a_for_np, -1, -2),
                        np.swapaxes(matrix_b_for_np, -1, -2))
    elif trans_a:
        res = np.matmul(np.swapaxes(matrix_a_for_np, -1, -2), matrix_b_for_np)
    elif trans_b:
        res = np.matmul(matrix_a_for_np, np.swapaxes(matrix_b_for_np, -1, -2))
    else:
        res = np.matmul(matrix_a_for_np, matrix_b_for_np)

    res = np.add(res, matrix_bias_for_np)

    if dtype == "float16":
        res = res.astype(dtype)
    return matrix_a, matrix_b, matrix_bias, res
Exemple #19
0
def gen_data(in_shape, in_dtype, inter_dtype, layout, out_dtype):

    if layout == "NHWC":
        num_channel = in_shape[3]
    else:
        num_channel = in_shape[1]

    data = [np.nan] * 10
    data[0] = random_gaussian([num_channel], miu=1,
                              sigma=0.1).astype(inter_dtype)
    data[1] = random_gaussian([num_channel], miu=1,
                              sigma=0.1).astype(inter_dtype)
    data[2] = random_gaussian([num_channel], miu=1,
                              sigma=0.1).astype(inter_dtype)
    data[3] = random_gaussian([num_channel], miu=1,
                              sigma=0.1).astype(inter_dtype)
    data[4] = random_gaussian(in_shape, miu=1, sigma=0.1).astype(in_dtype)
    data[5] = random_gaussian([num_channel], miu=1,
                              sigma=0.1).astype(inter_dtype)
    data[6] = random_gaussian([num_channel], miu=1,
                              sigma=0.1).astype(inter_dtype)
    data[7] = random_gaussian([num_channel], miu=1,
                              sigma=0.1).astype(inter_dtype)
    data[8] = random_gaussian([num_channel], miu=1,
                              sigma=0.1).astype(inter_dtype)
    data[9] = random_gaussian(in_shape, miu=1, sigma=0.1).astype(in_dtype)

    expect = compute_expect(data, inter_dtype, layout, out_dtype)
    output = np.full(expect.shape, np.nan, out_dtype)

    return data, output, expect
Exemple #20
0
def gen_data(begin_norm_axis, begin_params_axis, dtype, shape_x):
    input = random_gaussian(shape_x, miu=1, sigma=0.1).astype(dtype)
    gamma = random_gaussian(shape_x[begin_params_axis:], miu=1, sigma=0.1).astype(dtype)
    beta = random_gaussian(shape_x[begin_params_axis:], miu=1, sigma=0.1).astype(dtype)
    in_rank = len(shape_x)
    if begin_norm_axis < 0:
        norm_axis = begin_norm_axis + in_rank
    else:
        norm_axis = begin_norm_axis
    norm_axes = tuple(range(norm_axis, in_rank))
    mean = np.broadcast_to(np.mean(input, axis=norm_axes, keepdims=True), shape_x)
    diff = input - mean
    square = np.square(diff)
    smean = np.broadcast_to(np.mean(square, axis=norm_axes, keepdims=True), shape_x)
    meps = smean + 1e-5
    # sqrt = np.sqrt(meps)
    # rsqrt = 1.0 / sqrt
    logs = np.log(meps)
    mul = logs * (-0.5)
    rsqrt = np.exp(mul)
    out = diff * rsqrt
    bn = out * gamma + beta
    output = np.full(shape_x, np.nan, dtype)
    out_mean = np.full(shape_x, np.nan, dtype)
    out_variance = np.full(shape_x, np.nan, dtype)
    expect = (bn, mean, smean)
    return beta, expect, gamma, input, out_mean, out_variance, output
Exemple #21
0
def gen_data(dtype, shape):
    input1 = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
    input2 = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
    head_np = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
    expect = head_np * input2
    output = np.full(shape, np.nan, dtype)
    return expect, head_np, input1, input2, output
Exemple #22
0
def gen_data(data_shape, dtype, num_rois, pooled_size, rois_shape):
    inputs = random_gaussian(data_shape, miu=1, sigma=0.1).astype(dtype)
    # rois = np.array([[0.0,2.0,2.0,8.0,8.0],[0.0,4.0,4.0,12.0,12.0]]).astype(dtype)
    rois = np.random.uniform(0.0, 0.1, size=rois_shape)
    for x in range(num_rois):
        rois[x][0] = np.random.randint(data_shape[0], size=1)
        rois[x][1] = np.random.uniform(low=0.0, high=data_shape[3], size=1)
        rois[x][2] = np.random.uniform(low=0.0, high=data_shape[2], size=1)
        rois[x][3] = np.random.uniform(low=rois[x][1],
                                       high=data_shape[3],
                                       size=1)
        rois[x][4] = np.random.uniform(low=rois[x][2],
                                       high=data_shape[2],
                                       size=1)
    rois = rois.astype(dtype)
    pooled_size_h = 0.0
    pooled_size_w = 0.0
    if isinstance(pooled_size, int):
        pooled_size_h = pooled_size
        pooled_size_w = pooled_size
        e_shape = (num_rois, data_shape[1], pooled_size_h, pooled_size_w)
    else:
        pooled_size_h, pooled_size_w = pooled_size
        e_shape = (num_rois, data_shape[1], pooled_size_h, pooled_size_w)
    output = np.full(e_shape, 1.0, dtype)
    expect = random_gaussian(e_shape, miu=1, sigma=0.1)
    return e_shape, expect, inputs, output, pooled_size_h, pooled_size_w, rois
Exemple #23
0
def matmul_ad_run(data_shape,
                  weight_shape,
                  dtype,
                  attrs_op={},
                  cce_path="./",
                  attrs={}):
    attrs.update(attrs_op)
    check_list = ["float16"]
    if not (dtype.lower() in check_list):
        raise RuntimeError("matmul test only support %s while dtype is %s" %
                           (",".join(check_list), dtype))

    mod = matmul_ad.matmul_ad(data_shape, weight_shape, dtype, attrs=attrs)
    input_data = random_gaussian(data_shape, miu=0.1, sigma=0.1)
    input_data = input_data.astype(np.float16)
    input_weight = random_gaussian(weight_shape, miu=0.1, sigma=0.1)
    input_weight = input_weight.astype(np.float16)
    expect = np.matmul(np.matmul(input_data, input_weight),
                       np.transpose(input_weight))

    output = np.full(data_shape, 1.0, dtype)
    output = utils.mod_launch(
        mod, (np.matmul(input_data, input_weight), input_weight, output),
        expect=expect)
    return (np.matmul(input_data, input_weight),
            input_weight), output, expect, compare_tensor(output,
                                                          expect,
                                                          atol=5e-01,
                                                          rtol=5e-03,
                                                          equal_nan=True)
Exemple #24
0
def gen_data(dtype, shape1, shape2):
    def pow_data_process(x, y):
        # For pow, if value of x is a negative number, the corresponding value of y must be an integer,
        # for example, the corresponding value of y is 1.0, -2.0, 3.0.
        x_b = np.broadcast_to(x, np.broadcast(x, y).shape)
        x_b_neg_index = np.where(x_b < 0)
        if len(x_b_neg_index) > 0 and len(x_b_neg_index[0]) > 0:
            shape_len_diff = len(x_b.shape) - len(y.shape)
            y_int_index = list(x_b_neg_index[shape_len_diff:])
            for dim in range(len(y.shape)):
                if y.shape[dim] != x_b.shape[dim + shape_len_diff]:
                    if y.shape[dim] != 1:
                        raise ValueError("broadcast dismatch %s vs %s" %
                                         (y.shape[dim], x_b.shape[dim]))
                    y_int_index[dim] = np.array([0] * len(y_int_index[dim]))
            y_int_index = tuple(y_int_index)
            y_int = y.astype(np.int32).astype(y.dtype)
            y[y_int_index] = y_int[y_int_index]

    input1 = random_gaussian(shape1, miu=1, sigma=0.1).astype(dtype)
    input2 = random_gaussian(shape2, miu=1, sigma=0.1).astype(dtype)
    pow_data_process(input1, input2)
    expect = np.power(input1, input2)
    output = np.full(expect.shape, np.nan, dtype)
    return expect, input1, input2, output
Exemple #25
0
def gen_data(shape, dtype, lr, momentum, rho, epsilon):
    var = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
    grad = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
    rho = np.array([rho]).astype(dtype)
    mg = grad * rho
    ms = grad * grad
    mom = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
    lr = np.array([lr]).astype(dtype)
    momentum = np.array([momentum]).astype(dtype)
    inputs = [var, mg, ms, mom, grad, lr, momentum, rho]

    if dtype == "float16":
        var, mg, ms, mom, grad, lr, momentum, rho = [
            x.astype("float32") for x in inputs
        ]

    one = np.array([1.0], dtype=rho.dtype)
    out_mg = rho * mg + (one - rho) * grad
    out_ms = rho * ms + (one - rho) * grad * grad
    out_mom = momentum * mom + lr * grad / np.sqrt(out_ms - out_mg * out_mg +
                                                   epsilon)
    out_var = var - out_mom

    exp_output = (out_var, out_mg, out_ms, out_mom)
    if dtype != out_var.dtype:
        exp_output = tuple([x.astype(dtype) for x in exp_output])
    args = inputs

    return exp_output, inputs, args
Exemple #26
0
def gen_data(dtype, shape):
    input_np = random_gaussian(shape, miu=1, sigma=0.5).astype(dtype)
    head_np = random_gaussian(shape, miu=1, sigma=0.5).astype(dtype)
    softplus_grad = softplus_ad_benchmark(input_np)
    expect = softplus_grad * head_np
    output = np.full(expect.shape, np.nan, dtype)
    return expect, head_np, input_np, output
Exemple #27
0
def gen_data(bias_shape, dtype, input_shape, scale_shape):
    bias_data = None
    if dtype.lower() in ["float16", "float32", "int32"]:
        input_data = random_gaussian(input_shape, miu=1,
                                     sigma=50.0).astype(dtype.lower())
        scale_data = random_gaussian(scale_shape, miu=1,
                                     sigma=2.0).astype(dtype.lower())
        if len(bias_shape) > 0:
            bias_data = random_gaussian(bias_shape, miu=1,
                                        sigma=0.5).astype(dtype.lower())
    elif dtype.lower() == "int8":
        input_data = np.random.randint(-40, 40, size=input_shape, dtype="int8")
        scale_data = np.random.randint(-3, 3, size=scale_shape, dtype="int8")
        if len(bias_shape) > 0:
            bias_data = np.random.randint(-3, 3, size=bias_shape, dtype="int8")
    elif dtype.lower() == "uint8":
        input_data = np.random.randint(0, 40, size=input_shape, dtype="uint8")
        scale_data = np.random.randint(0, 5, size=scale_shape, dtype="uint8")
        if len(bias_shape) > 0:
            bias_data = np.random.randint(0,
                                          10,
                                          size=bias_shape,
                                          dtype="uint8")
    else:
        raise RuntimeError("not supported data type %s" % dtype)
    expect = input_data * scale_data
    if len(bias_shape) > 0:
        expect = expect + bias_data
    output = np.full(expect.shape, np.nan, dtype)
    return bias_data, expect, input_data, output, scale_data
Exemple #28
0
def gen_data(dtype, reduction, shape):
    # support_list = {"float16": np.float16, "float32": np.float32}
    target = random_gaussian(shape, miu=0, sigma=1).astype(dtype)
    target = np.abs(target)
    prediction = random_gaussian(shape, miu=0, sigma=1).astype(dtype)
    prediction = np.abs(prediction)
    # off_set = np.full(prediction.shape, 0.05, dtype)
    off_set = np.full(prediction.shape, 2, dtype)
    prediction = np.add(prediction, off_set)
    target = np.add(target, off_set)
    pre_log = np.log(prediction).astype(dtype)
    tar_log = np.log(target).astype(dtype)
    sub_log = np.subtract(tar_log, pre_log)
    expect = np.multiply(target, sub_log).astype(dtype)
    if reduction == 'sum':
        expect = np.sum(expect)
    if reduction == 'mean':
        expect = np.mean(expect)
    if reduction == 'batchmean':
        reduce_axis = tuple(np.arange(1, len(shape)))
        expect = np.mean(expect, axis=reduce_axis, keepdims=False)
    if reduction == 'sum' or reduction == 'mean':
        out_shape = []
        out_shape.append(1)
        output = np.full(out_shape, 0, dtype)
    if reduction == 'batchmean':
        reduce_axis = tuple(np.arange(1, len(shape)))
        out_shape = get_reduce_out_shape(shape,
                                         axis=reduce_axis,
                                         keepdims=False)
        output = np.full(expect.shape, np.nan, dtype)
    if reduction == 'none':
        output = np.full(expect.shape, np.nan, dtype)
    return expect, output, prediction, target
Exemple #29
0
def gen_data(shape, dtype):
    data1 = random_gaussian(shape, miu=3, sigma=0.1).astype(dtype)
    data2 = random_gaussian(shape, miu=3, sigma=0.1).astype(dtype)
    data3 = random_gaussian(shape, miu=3, sigma=0.1).astype(dtype)
    data4 = random_gaussian(shape, miu=3, sigma=0.1).astype(dtype)

    return [data1, data2, data3, data4]
Exemple #30
0
def gen_data(dtype, shape, w_shape):
    # input_data = -0.01 * np.ones(shape).astype(dtype)
    # dy = -0.01 * np.ones(shape).astype(dtype)
    input_data = random_gaussian(shape, miu=0,
                                 sigma=0.001).astype(dtype.lower())
    dy = random_gaussian(shape, miu=-0.01, sigma=0.001).astype(dtype.lower())
    # w_data = random_gaussian(w_shape, miu=0, sigma=1).astype(dtype.lower())
    # input_data = np.random.uniform(low=-1.0, high=1.0, size=shape).astype(dtype)
    # dy = np.random.uniform(low=-1.0, high=1.0, size=shape).astype(dtype)
    w_data = np.random.uniform(low=0, high=1.0, size=w_shape).astype(dtype)
    w_reshape = w_data.reshape(1, w_shape[0], 1, 1)
    w_broadcast = np.broadcast_to(w_reshape, shape)
    expect_dA = dy * (input_data >= 0) + dy * (input_data < 0) * w_broadcast
    # expect_dA = (dy.astype("float32") * (input_data >= 0) + dy.astype("float32") * (input_data < 0) * w_broadcast.astype("float32")).astype(dtype.lower())
    dw_intermediate = dy * (input_data < 0) * input_data
    if w_shape[0] == 1:
        # expect_dw = np.sum(dw_intermediate, keepdims = True, dtype=dtype)
        expect_dw = (np.sum(dw_intermediate,
                            dtype="float32")).astype(dtype.lower())
        # expect_dw = np.sum(dw_intermediate, dtype=dtype)
        expect_dw = expect_dw.reshape(1)
    else:
        expect_dw = (np.sum(dw_intermediate, axis=(0, 2, 3),
                            dtype="float32")).astype(dtype.lower())
        # expect_dw = np.sum(dw_intermediate, axis=(0,2,3), dtype=dtype)
        # expect_dw = np.sum(dw_intermediate.astype("float32"), axis=(0,2,3))
        # expect_dw = expect_dw.astype(dtype)
    output_dA = np.full(expect_dA.shape, np.nan, dtype)
    output_dw = np.full(expect_dw.shape, np.nan, dtype)
    return dy, expect_dA, expect_dw, input_data, output_dA, output_dw, w_data