Beispiel #1
0
def gen_data(fm_shape, w_shape, pad, stride, dilation, bias, expect_file):

    conv_param = {'stride': stride, 'pad': pad, 'dilation': dilation}
    stride, pad, dilation = conv_param_prepare(conv_param)
    fm_shape, w_shape, out_shape = conv_shape_4d(fm_shape, w_shape, pad,
                                                 stride, dilation)
    IN, IC, IH, IW = fm_shape
    WN, WC, WH, WW = w_shape

    x = random_gaussian((IN, IC, IH, IW), miu=1, sigma=0.1).astype(np.float16)
    w = random_gaussian((WN, WC, WH, WW), miu=0.5,
                        sigma=0.01).astype(np.float16)

    if bias:
        b = random_gaussian((WN, ), miu=1, sigma=0.1).astype(np.float16)
    else:
        b = (np.array(np.zeros(WN))).astype(np.float16, copy=False)

    flag_w = os.environ.get("WRITE_TO_DISK", "No")
    if (flag_w == "No") and (os.path.exists(expect_file) == True):
        #read expect from file
        out = np.fromfile(expect_file, np.float16).reshape(out_shape)
    else:
        #compute expect data:
        out = conv_forward_naive(x.astype(np.float32), w.astype(np.float32),
                                 b.astype(np.float32), conv_param)
        out = out.astype(np.float16)

    if flag_w == "Yes":
        # write expect to file
        with open(expect_file, "w+") as file:
            out.tofile(file)
            file.close()

    return conv_tensor_4d_to_5d(x, w, b, out)
def gen_data(shape, dtype, lr, momentum, rho, epsilon):
    """Generates input, output and expect data."""
    var = random_gaussian(shape, miu=10, sigma=1.0).astype(dtype)
    ms = np.abs(random_gaussian(shape, miu=4, sigma=0.1).astype(dtype))
    mom = random_gaussian(shape, miu=3, sigma=0.3).astype(dtype)
    grad = random_gaussian(shape, miu=3, sigma=0.3).astype(dtype)
    lr = np.array([lr]).astype(dtype)
    momentum = np.array([momentum]).astype(dtype)
    rho = np.array([rho]).astype(dtype)

    inputs = [var, ms, mom, grad, lr, momentum, rho]

    # ms = rho * ms + (1-rho) * grad * grad
    # mom = momentum * mom + lr * grad / sqrt(ms + epsilon)
    # var = var - mom
    one = np.array([1.0]).astype(dtype)
    ms_1 = rho * ms
    ms_2 = (one - rho) * grad * grad
    ms_update = ms_1 + ms_2
    mom_1 = momentum * mom
    mom_2_1 = lr * grad
    mom_2_2 = one / np.sqrt(ms_update + epsilon)
    mom_3 = mom_2_1 * mom_2_2
    mom_update = mom_1 + mom_3
    var_update = var - mom_update

    expects = (var_update, var_update.astype("float16"), ms_update, mom_update)
    outputs = np.full(var_update.shape, np.nan, "float16")
    args = [*inputs, outputs]

    return inputs, expects, args
Beispiel #3
0
def gen_data(dtype, shape1, shape2):

    def pow_data_process(x, y):
        # For pow, if value of x is a negative number, the corresponding value of y must be an integer,
        # for example, the corresponding value of y is 1.0, -2.0, 3.0.
        x_b = np.broadcast_to(x, np.broadcast(x, y).shape)
        x_b_neg_index = np.where(x_b<0)
        if len(x_b_neg_index) > 0 and len(x_b_neg_index[0]) > 0:
            shape_len_diff = len(x_b.shape) - len(y.shape)
            y_int_index = list(x_b_neg_index[shape_len_diff:])
            for dim in range(len(y.shape)):
                if y.shape[dim] != x_b.shape[dim+shape_len_diff]:
                    if y.shape[dim] != 1:
                        raise ValueError("broadcast dismatch %s vs %s" %(y.shape[dim], x_b.shape[dim]))
                    y_int_index[dim] = np.array([0] * len(y_int_index[dim]))
            y_int_index = tuple(y_int_index)    
            y_int = y.astype(np.int32).astype(y.dtype)
            y[y_int_index] = y_int[y_int_index]
      
    input1 = random_gaussian(shape1, miu=1, sigma=0.1).astype(dtype)
    input2 = random_gaussian(shape2, miu=1, sigma=0.1).astype(dtype)
    pow_data_process(input1, input2)
    expect = np.power(input1, input2)
    output = np.full(expect.shape, np.nan, dtype)
    return expect, input1, input2, output
Beispiel #4
0
def gen_data(dtype, shape, shape2):
    support_list = {"float16": np.float16, "float32": np.float32}
    input1 = random_gaussian(shape, miu=1, sigma=0.1).astype(support_list[dtype])
    input2 = random_gaussian(shape2, miu=1, sigma=0.1).astype(support_list[dtype])
    expect = gen_expect(input1, input2)
    output = np.full(expect.shape, np.nan, dtype)
    return expect, input1, input2, output
Beispiel #5
0
def gen_data(bs, m, n, k, shape_bias, trans_a, trans_b, dtype):
    shape_a, shape_b, shape_out = get_shape(bs, m, n, k, trans_a, trans_b)
    matrix_a = random_gaussian(shape_a, miu=0.5, sigma=0.01).astype(dtype)
    matrix_b = random_gaussian(shape_b, miu=0.5, sigma=0.01).astype(dtype)
    if len(shape_bias) > 0:
        matrix_bias = random_gaussian(shape_bias, miu=0.5,
                                      sigma=0.01).astype(dtype)
    else:
        matrix_bias = np.zeros(shape_bias, dtype=dtype)

    # cast to float32 for fast compute in numpy
    if dtype == "float16":
        matrix_a_for_np = matrix_a.astype(np.float32)
        matrix_b_for_np = matrix_b.astype(np.float32)
        matrix_bias_for_np = matrix_bias.astype(np.float32)
    else:
        matrix_a_for_np = matrix_a
        matrix_b_for_np = matrix_b
        matrix_bias_for_np = matrix_bias
    if trans_a and trans_b:
        res = np.matmul(np.swapaxes(matrix_a_for_np, -1, -2),
                        np.swapaxes(matrix_b_for_np, -1, -2))
    elif trans_a:
        res = np.matmul(np.swapaxes(matrix_a_for_np, -1, -2), matrix_b_for_np)
    elif trans_b:
        res = np.matmul(matrix_a_for_np, np.swapaxes(matrix_b_for_np, -1, -2))
    else:
        res = np.matmul(matrix_a_for_np, matrix_b_for_np)

    res = np.add(res, matrix_bias_for_np)

    if dtype == "float16":
        res = res.astype(dtype)
    return matrix_a, matrix_b, matrix_bias, res
def gen_data(in_shape, in_dtype, inter_dtype, layout, out_dtype):

    if layout == "NHWC":
        num_channel = in_shape[3]
    else:
        num_channel = in_shape[1]

    data = [np.nan] * 10
    data[0] = random_gaussian([num_channel], miu=1,
                              sigma=0.1).astype(inter_dtype)
    data[1] = random_gaussian([num_channel], miu=1,
                              sigma=0.1).astype(inter_dtype)
    data[2] = random_gaussian([num_channel], miu=1,
                              sigma=0.1).astype(inter_dtype)
    data[3] = random_gaussian([num_channel], miu=1,
                              sigma=0.1).astype(inter_dtype)
    data[4] = random_gaussian(in_shape, miu=1, sigma=0.1).astype(in_dtype)
    data[5] = random_gaussian([num_channel], miu=1,
                              sigma=0.1).astype(inter_dtype)
    data[6] = random_gaussian([num_channel], miu=1,
                              sigma=0.1).astype(inter_dtype)
    data[7] = random_gaussian([num_channel], miu=1,
                              sigma=0.1).astype(inter_dtype)
    data[8] = random_gaussian([num_channel], miu=1,
                              sigma=0.1).astype(inter_dtype)
    data[9] = random_gaussian(in_shape, miu=1, sigma=0.1).astype(in_dtype)

    expect = compute_expect(data, inter_dtype, layout, out_dtype)
    output = np.full(expect.shape, np.nan, out_dtype)

    return data, output, expect
Beispiel #7
0
def gen_data(dtype, shape):
    """Generate data for testing the op"""
    y = random_gaussian(size=shape).astype(dtype)
    dy = random_gaussian(size=shape).astype(dtype)
    expect = _asinh_grad_compute(y, dy)
    output = np.full(expect.shape, np.nan, dtype)
    return expect, [y, dy], output
Beispiel #8
0
def gen_data(shape, dtype):
    var = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
    m = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
    grad = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
    lr = np.random.rand(1).astype(dtype)
    logbase = np.random.rand(1).astype(dtype)
    sign_decay = np.random.rand(1).astype(dtype)
    beta = np.random.rand(1).astype(dtype)

    inputs = [var, m, grad, lr, logbase, sign_decay, beta]

    if dtype == "float16":
        var, m, grad, lr, logbase, sign_decay, beta = [
            i.astype("float32") for i in inputs
        ]
    one = np.array([1]).astype(var.dtype)
    m_out = m * beta + grad * (one - beta)
    var_out = var - lr * np.exp(
        logbase * sign_decay * np.sign(grad) * np.sign(m_out)) * grad

    if dtype == "float16":
        exp_output = (var_out.astype(dtype), m_out.astype(dtype))
    else:
        exp_output = (var_out, m_out)

    args = inputs

    return exp_output, inputs, args
Beispiel #9
0
def get_input_data(dtype, kernel, pad, shape, stride, support_list):
    # Generate data for testing the op
    x = random_gaussian(shape, miu=1, sigma=0.1).astype(support_list[dtype])
    y = avgpool_run.benchmark(x, kernel, stride, pad)
    dy = random_gaussian(y.shape, miu=1, sigma=0.1).astype(support_list[dtype])
    dy = np.abs(dy)
    return dy, x, y
Beispiel #10
0
def gen_data(dtype, shape, w_shape):
    # input_data = -0.01 * np.ones(shape).astype(dtype)
    # dy = -0.01 * np.ones(shape).astype(dtype)
    input_data = random_gaussian(shape, miu=0,
                                 sigma=0.001).astype(dtype.lower())
    dy = random_gaussian(shape, miu=-0.01, sigma=0.001).astype(dtype.lower())
    # w_data = random_gaussian(w_shape, miu=0, sigma=1).astype(dtype.lower())
    # input_data = np.random.uniform(low=-1.0, high=1.0, size=shape).astype(dtype)
    # dy = np.random.uniform(low=-1.0, high=1.0, size=shape).astype(dtype)
    w_data = np.random.uniform(low=0, high=1.0, size=w_shape).astype(dtype)
    w_reshape = w_data.reshape(1, w_shape[0], 1, 1)
    w_broadcast = np.broadcast_to(w_reshape, shape)
    expect_dA = dy * (input_data >= 0) + dy * (input_data < 0) * w_broadcast
    # expect_dA = (dy.astype("float32") * (input_data >= 0) + dy.astype("float32") * (input_data < 0) * w_broadcast.astype("float32")).astype(dtype.lower())
    dw_intermediate = dy * (input_data < 0) * input_data
    if w_shape[0] == 1:
        # expect_dw = np.sum(dw_intermediate, keepdims = True, dtype=dtype)
        expect_dw = (np.sum(dw_intermediate,
                            dtype="float32")).astype(dtype.lower())
        # expect_dw = np.sum(dw_intermediate, dtype=dtype)
        expect_dw = expect_dw.reshape(1)
    else:
        expect_dw = (np.sum(dw_intermediate, axis=(0, 2, 3),
                            dtype="float32")).astype(dtype.lower())
        # expect_dw = np.sum(dw_intermediate, axis=(0,2,3), dtype=dtype)
        # expect_dw = np.sum(dw_intermediate.astype("float32"), axis=(0,2,3))
        # expect_dw = expect_dw.astype(dtype)
    output_dA = np.full(expect_dA.shape, np.nan, dtype)
    output_dw = np.full(expect_dw.shape, np.nan, dtype)
    return dy, expect_dA, expect_dw, input_data, output_dA, output_dw, w_data
def gen_data(shape, out_shape, dtype, out_dtype):
    support_list = {"float16": np.float16, "float32": np.float32}
    inshp_data = random_gaussian(shape, miu=1, sigma=0.1).astype(support_list[dtype])
    outshp_data = random_gaussian(out_shape, miu=1, sigma=0.1).astype(support_list[out_dtype])
    output = np.full(out_shape, np.nan, out_dtype)
    expect = compute_expect(inshp_data, outshp_data)
    return inshp_data, outshp_data, output, expect
def gen_data(shape, dtype, lr, momentum, rho, epsilon):
    var = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
    grad = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
    rho = np.array([rho]).astype(dtype)
    mg = grad * rho
    ms = grad * grad
    mom = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
    lr = np.array([lr]).astype(dtype)
    momentum = np.array([momentum]).astype(dtype)
    inputs = [var, mg, ms, mom, grad, lr, momentum, rho]

    if dtype == "float16":
        var, mg, ms, mom, grad, lr, momentum, rho = [x.astype("float32") for x in inputs]

    one = np.array([1.0], dtype=rho.dtype)
    out_mg = rho * mg + (one - rho) * grad
    out_ms = rho * ms + (one - rho) * grad * grad
    out_mom = momentum * mom + lr * grad / np.sqrt(out_ms - out_mg * out_mg + epsilon)
    out_var = var - out_mom

    exp_output = (out_var, out_mg, out_ms, out_mom)
    if dtype != out_var.dtype:
        exp_output = tuple([x.astype(dtype) for x in exp_output])
    args = inputs

    return exp_output, inputs, args
Beispiel #13
0
def gen_data(shape, dtype):
    support_list = {"float16": np.float16, "float32": np.float32}
    lhd = random_gaussian(shape, miu=1, sigma=0.1).astype(support_list[dtype])
    rhd = random_gaussian(shape, miu=1, sigma=0.1).astype(support_list[dtype])
    expect = np.multiply(lhd, rhd)
    output = np.full(shape, np.nan, dtype)
    return lhd, rhd, output, expect
Beispiel #14
0
def gen_data(dtype, reduction, shape):
    # support_list = {"float16": np.float16, "float32": np.float32}
    target = random_gaussian(shape, miu=0, sigma=1).astype(dtype)
    target = np.abs(target)
    prediction = random_gaussian(shape, miu=0, sigma=1).astype(dtype)
    prediction = np.abs(prediction)
    # off_set = np.full(prediction.shape, 0.05, dtype)
    off_set = np.full(prediction.shape, 2, dtype)
    prediction = np.add(prediction, off_set)
    target = np.add(target, off_set)
    pre_log = np.log(prediction).astype(dtype)
    tar_log = np.log(target).astype(dtype)
    sub_log = np.subtract(tar_log, pre_log)
    expect = np.multiply(target, sub_log).astype(dtype)
    if reduction == 'sum':
        expect = np.sum(expect)
    if reduction == 'mean':
        expect = np.mean(expect)
    if reduction == 'batchmean':
        reduce_axis = tuple(np.arange(1, len(shape)))
        expect = np.mean(expect, axis=reduce_axis, keepdims=False)
    if reduction == 'sum' or reduction == 'mean':
        out_shape = []
        out_shape.append(1)
        output = np.full(out_shape, 0, dtype)
    if reduction == 'batchmean':
        reduce_axis = tuple(np.arange(1, len(shape)))
        out_shape = get_reduce_out_shape(shape,
                                         axis=reduce_axis,
                                         keepdims=False)
        output = np.full(expect.shape, np.nan, dtype)
    if reduction == 'none':
        output = np.full(expect.shape, np.nan, dtype)
    return expect, output, prediction, target
Beispiel #15
0
def gen_data(bias_shape, dtype, input_shape, scale_shape):
    bias_data = None
    if dtype.lower() in ["float16", "float32", "int32"]:
        input_data = random_gaussian(input_shape, miu=1,
                                     sigma=50.0).astype(dtype.lower())
        scale_data = random_gaussian(scale_shape, miu=1,
                                     sigma=2.0).astype(dtype.lower())
        if len(bias_shape) > 0:
            bias_data = random_gaussian(bias_shape, miu=1,
                                        sigma=0.5).astype(dtype.lower())
    elif dtype.lower() == "int8":
        input_data = np.random.randint(-40, 40, size=input_shape, dtype="int8")
        scale_data = np.random.randint(-3, 3, size=scale_shape, dtype="int8")
        if len(bias_shape) > 0:
            bias_data = np.random.randint(-3, 3, size=bias_shape, dtype="int8")
    elif dtype.lower() == "uint8":
        input_data = np.random.randint(0, 40, size=input_shape, dtype="uint8")
        scale_data = np.random.randint(0, 5, size=scale_shape, dtype="uint8")
        if len(bias_shape) > 0:
            bias_data = np.random.randint(0,
                                          10,
                                          size=bias_shape,
                                          dtype="uint8")
    else:
        raise RuntimeError("not supported data type %s" % dtype)
    expect = input_data * scale_data
    if len(bias_shape) > 0:
        expect = expect + bias_data
    output = np.full(expect.shape, np.nan, dtype)
    return bias_data, expect, input_data, output, scale_data
Beispiel #16
0
def realdiv_ad_run(ashape, bshape, dtype, kernel_name, attrs):
    if 'tuning' in attrs.keys():
        t = attrs.get("tuning", False)
        kernel_name = attrs.get("kernel_name", False)
        a = random_gaussian(ashape, miu=1, sigma=0.1).astype(dtype)
        b = random_gaussian(bshape, miu=1, sigma=0.1).astype(dtype)
        out = np.divide(a, b)
        mod = utils.op_build_test(realdiv_ad, [out.shape, ashape, bshape],
                                  [dtype, dtype, dtype],
                                  kernel_name=kernel_name,
                                  attrs=attrs,
                                  tuning=t)
        if t:
            expect, head_np, output = gen_data(b, dtype, out)
            return mod, expect, (head_np, a, b, output)
        else:
            return mod
    else:
        a = random_gaussian(ashape, miu=1, sigma=0.1).astype(dtype)
        b = random_gaussian(bshape, miu=1, sigma=0.1).astype(dtype)
        out = np.divide(a, b)

        expect, head_np, output = gen_data(b, dtype, out)
        mod = utils.op_build_test(realdiv_ad, [out.shape, ashape, bshape],
                                  [dtype, dtype, dtype],
                                  kernel_name=kernel_name,
                                  attrs=attrs)
        output = utils.mod_launch(mod, (head_np, a, b, output), expect=expect)
        return (head_np, a, b), output, expect, compare_tensor(output,
                                                               expect,
                                                               atol=0.1)
Beispiel #17
0
def gen_data(shape_ref, shape_indices, dtype_ref, dtype_indices):
    ref = random_gaussian(shape_ref, miu=10, sigma=0.3).astype(dtype_ref)
    new_ref = ref.copy()

    # generate valid index
    indices = np.random.randint(low=0,
                                high=shape_ref[0],
                                size=shape_indices,
                                dtype=dtype_indices)

    # reshape to a 1D tensor to index
    all_shape = np.prod(shape_indices).astype(dtype_indices)
    new_indices = np.reshape(indices, (all_shape, ))

    # according to indices shape and ref shape to make updates shape
    updates_shape = shape_indices + shape_ref[1:]
    updates = random_gaussian(updates_shape, miu=3,
                              sigma=0.3).astype(dtype_ref)

    # according to new_indieces shape and ref shape to make new_update_shape, make sure to update base on new_indices
    new_updates_shape = new_indices.shape + shape_ref[1:]
    new_updates = np.reshape(updates, new_updates_shape)

    # get results by new_updates
    for i in range(new_indices.shape[0]):
        new_ref[new_indices[i], ] += new_updates[i, ]

    output = np.full(shape_ref, np.nan, dtype_ref)
    args = [ref, indices, updates, output]
    return args, new_ref, ref, indices, updates,
Beispiel #18
0
def get_input_data(c, c0, dtype, eps, shape, support_list):
    if c0 is None:
        chan_shape = (1, c, 1, 1)
    else:
        chan_shape = (1, c, 1, 1, c0)

    axes = (0, 2, 3)

    miu = 0.5
    sigma = 0.03
    np_data = random_gaussian(shape, miu=miu,
                              sigma=sigma).astype(support_list[dtype])
    np_mean = np.mean(np_data, axis=axes, keepdims=True).astype(dtype)
    np_var = np.var(np_data, axis=axes, keepdims=True).astype(dtype)

    np_gamma = random_gaussian(chan_shape, miu=miu,
                               sigma=sigma).astype(support_list[dtype])
    np_beta = random_gaussian(chan_shape, miu=miu,
                              sigma=sigma).astype(support_list[dtype])
    np_var = np.abs(np_var)

    mean_bc = np.broadcast_to(np_mean, shape)
    var_bc = np.broadcast_to(np_var, shape)
    gamma_bc = np.broadcast_to(np_gamma, shape)
    beta_bc = np.broadcast_to(np_beta, shape)
    rsqvar2 = (1.0 / np.sqrt(var_bc + eps)).astype(var_bc.dtype)

    rsqvar = np.exp(np.log(var_bc + eps, dtype="float32") * -0.5,
                    dtype="float32")
    normalize_data = (np_data - mean_bc) * rsqvar
    y = gamma_bc * normalize_data + beta_bc

    return gamma_bc, np_beta, np_data, np_gamma, np_mean, np_var, var_bc, normalize_data, y
Beispiel #19
0
def gen_data(data_shape, dtype, num_rois, pooled_size, rois_shape):
    inputs = random_gaussian(data_shape, miu=1, sigma=0.1).astype(dtype)
    # rois = np.array([[0.0,2.0,2.0,8.0,8.0],[0.0,4.0,4.0,12.0,12.0]]).astype(dtype)
    rois = np.random.uniform(0.0, 0.1, size=rois_shape)
    for x in range(num_rois):
        rois[x][0] = np.random.randint(data_shape[0], size=1)
        rois[x][1] = np.random.uniform(low=0.0, high=data_shape[3], size=1)
        rois[x][2] = np.random.uniform(low=0.0, high=data_shape[2], size=1)
        rois[x][3] = np.random.uniform(low=rois[x][1],
                                       high=data_shape[3],
                                       size=1)
        rois[x][4] = np.random.uniform(low=rois[x][2],
                                       high=data_shape[2],
                                       size=1)
    rois = rois.astype(dtype)
    pooled_size_h = 0.0
    pooled_size_w = 0.0
    if isinstance(pooled_size, int):
        pooled_size_h = pooled_size
        pooled_size_w = pooled_size
        e_shape = (num_rois, data_shape[1], pooled_size_h, pooled_size_w)
    else:
        pooled_size_h, pooled_size_w = pooled_size
        e_shape = (num_rois, data_shape[1], pooled_size_h, pooled_size_w)
    output = np.full(e_shape, 1.0, dtype)
    expect = random_gaussian(e_shape, miu=1, sigma=0.1)
    return e_shape, expect, inputs, output, pooled_size_h, pooled_size_w, rois
Beispiel #20
0
def gen_data(dtype, shape, with_l2_shrinkage=False):
    """Generate data for testing the op"""

    # tensors
    var = random_gaussian(shape).astype(dtype)
    accum = np.abs(random_gaussian(shape).astype(dtype))
    linear = random_gaussian(shape).astype(dtype)
    grad = random_gaussian(shape).astype(dtype)
    tensors = [var, accum, linear, grad]

    # scalars
    scalar_shape = (1, )
    lr = np.random.random_sample(scalar_shape).astype(dtype)
    l1 = np.random.random_sample(scalar_shape).astype(dtype)
    l2 = np.random.random_sample(scalar_shape).astype(dtype)
    lr_power = np.array([0.5], dtype)
    if with_l2_shrinkage:
        l2_shrinkage = np.random.random_sample(scalar_shape).astype(dtype)
        scalars = [lr, l1, l2, l2_shrinkage, lr_power]
    else:
        scalars = [lr, l1, l2, lr_power]

    # expects
    expects = apply_ftrl_impl(tensors, scalars, with_l2_shrinkage)

    return expects, tensors, scalars
Beispiel #21
0
def gen_data(shape, dtype, shape_origin, format, out_dtype):
    if format == 'zN':
        input = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
        n1, m1, m0, n0 = shape[-4:]
        new_shape = shape[:-4] + [m1 * m0, n1 * n0]
        tranpose_axis = [1, 2, 0, 3]
    elif format == 'zZ':
        input = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
        m1, n1, m0, n0 = shape[-4:]
        new_shape = shape[:-4] + [m1 * m0, n1 * n0]
        tranpose_axis = [0, 2, 1, 3]

    tranpose_axis = [x + len(shape) - 4 for x in tranpose_axis]
    tranpose_axis = [i for i in range(len(shape) - 4)] + tranpose_axis
    bench_mark = input.transpose(tranpose_axis).reshape(new_shape)
    if new_shape != shape_origin:
        if len(shape_origin) == 2:
            bench_mark = bench_mark[:shape_origin[0], :shape_origin[1]].astype(
                out_dtype)
        elif len(shape_origin) == 3:
            bench_mark = bench_mark[:,
                                    shape_origin[0], :shape_origin[1]].astype(
                                        out_dtype)
        elif len(shape_origin) == 4:
            bench_mark = bench_mark[:, :,
                                    shape_origin[0], :shape_origin[1]].astype(
                                        out_dtype)
        new_shape = shape_origin
    output = np.full(new_shape, np.nan, out_dtype)
    return output, input, bench_mark
Beispiel #22
0
def gen_data(begin_norm_axis, begin_params_axis, dtype, shape_x):
    input = random_gaussian(shape_x, miu=1, sigma=0.1).astype(dtype)
    gamma = random_gaussian(shape_x[begin_params_axis:], miu=1,
                            sigma=0.1).astype(dtype)
    beta = random_gaussian(shape_x[begin_params_axis:], miu=1,
                           sigma=0.1).astype(dtype)
    in_rank = len(shape_x)
    if begin_norm_axis < 0:
        norm_axis = begin_norm_axis + in_rank
    else:
        norm_axis = begin_norm_axis
    norm_axes = tuple(range(norm_axis, in_rank))
    mean = np.broadcast_to(np.mean(input, axis=norm_axes, keepdims=True),
                           shape_x)
    diff = input - mean
    square = np.square(diff)
    smean = np.broadcast_to(np.mean(square, axis=norm_axes, keepdims=True),
                            shape_x)
    meps = smean + 1e-5
    # sqrt = np.sqrt(meps)
    # rsqrt = 1.0 / sqrt
    logs = np.log(meps)
    mul = logs * (-0.5)
    rsqrt = np.exp(mul)
    out = diff * rsqrt
    bn = out * gamma + beta
    output = np.full(shape_x, np.nan, dtype)
    out_mean = np.full(shape_x, np.nan, dtype)
    out_variance = np.full(shape_x, np.nan, dtype)
    expect = (bn, mean, smean)
    return beta, expect, gamma, input, out_mean, out_variance, output
Beispiel #23
0
def gen_data(dtype, shape):
    input1 = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
    input2 = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
    head_np = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
    expect = head_np*input2
    output = np.full(shape, np.nan, dtype)
    return expect, head_np, input1, input2, output
Beispiel #24
0
def gen_data(shape, dtype, epsilon):
    """Generate data for testing the op."""
    var = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
    accum = np.abs(random_gaussian(shape, miu=1, sigma=0.3).astype(dtype))
    accum_update = np.abs(
        random_gaussian(shape, miu=1, sigma=0.3).astype(dtype))
    grad = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
    lr = np.random.rand(1).astype(dtype)
    rho = np.random.rand(1).astype(dtype)

    inputs = [var, accum, accum_update, grad, lr, rho]

    one = np.array([1]).astype(dtype)
    epsilon = np.array([epsilon]).astype(dtype)

    out_accum = rho * accum + (one - rho) * grad * grad
    update = np.sqrt(accum_update + epsilon) / np.sqrt(out_accum +
                                                       epsilon) * grad
    out_accum_update = rho * accum_update + (one - rho) * update * update
    out_var = var - update * lr

    expects = [out_var, out_accum, out_accum_update]
    args = inputs

    return inputs, expects, args
Beispiel #25
0
def gen_data(shape1, dtype1, shape2, dtype2):
    """generate valid data for arctangent"""
    head = random_gaussian(shape1, miu=0, sigma=0.5).astype(dtype1)
    input_x = random_gaussian(shape2, miu=0, sigma=0.5).astype(dtype2)
    expect = np.divide(1, np.add(1, np.square(input_x))) * head
    out_buf = np.full(shape1, np.nan, dtype1)
    return expect, (head, input_x), out_buf
Beispiel #26
0
def gen_data(shape1, dtype1, shape2, dtype2):
    """generate valid data for arctangent2"""
    input1 = random_gaussian(shape1, miu=0, sigma=0.5).astype(dtype1)
    input2 = random_gaussian(shape2, miu=0, sigma=0.5).astype(dtype2)
    expect = np.arctan2(input1, input2)
    out_buf = np.full(shape1, np.nan, dtype1)
    return expect, (input1, input2), out_buf
Beispiel #27
0
def gen_data(shape, dtype):
    data1 = random_gaussian(shape, miu=3, sigma=0.1).astype(dtype)
    data2 = random_gaussian(shape, miu=3, sigma=0.1).astype(dtype)
    data3 = random_gaussian(shape, miu=3, sigma=0.1).astype(dtype)
    data4 = random_gaussian(shape, miu=3, sigma=0.1).astype(dtype)

    return [data1, data2, data3, data4]
Beispiel #28
0
def gen_data(dtype, shape):
    input_np = random_gaussian(shape, miu=1, sigma=0.5).astype(dtype)
    head_np = random_gaussian(shape, miu=1, sigma=0.5).astype(dtype)
    softplus_grad = softplus_ad_benchmark(input_np)
    expect = softplus_grad * head_np
    output = np.full(expect.shape, np.nan, dtype)
    return expect, head_np, input_np, output
Beispiel #29
0
def gen_data(shape1, shape2, dtype):
    support_list = {"float16": np.float16, "float32": np.float32}
    lhs = random_gaussian(shape1, miu=1, sigma=0.1).astype(support_list[dtype])
    rhs = random_gaussian(shape2, miu=1, sigma=0.1).astype(support_list[dtype])
    expect = np.minimum(lhs, rhs)
    output = np.full(expect.shape, np.nan, expect.dtype)
    return lhs, rhs, output, expect
Beispiel #30
0
def gen_data(dtype, shape, use_nesterov=False):
    """Generate data for testing the op"""

    # tensors
    var = random_gaussian(shape).astype(dtype)
    m = random_gaussian(shape).astype(dtype)
    v = np.abs(random_gaussian(shape).astype(dtype))
    grad = random_gaussian(shape).astype(dtype)
    tensors = [var, m, v, grad]

    # scalars
    lr = np.array([0.001], dtype)
    beta1 = np.array([0.9], dtype)
    beta2 = np.array([0.999], dtype)
    epsilon = np.array([1e-7], dtype)
    t = np.random.randint(1, 100, size=(1, ))
    beta1_power = np.array([beta1**t], dtype)
    beta2_power = np.array([beta2**t], dtype)
    sclars = [beta1_power, beta2_power, lr, beta1, beta2, epsilon]

    # expects
    lr_coffient = np.sqrt(1.0 - beta2_power) / (1.0 - beta1_power)
    lr_t = lr * lr_coffient
    m_t = m + (1.0 - beta1) * (grad - m)
    v_t = v + (1.0 - beta2) * (grad * grad - v)
    v_t_sqrt = np.sqrt(v_t)
    if use_nesterov:
        var_t = var - (lr_t * (m_t * beta1 +
                               (1.0 - beta1) * grad)) / (epsilon + v_t_sqrt)
    else:
        var_t = var - (lr_t * m_t) / (epsilon + v_t_sqrt)
    expects = [var_t, m_t, v_t]
    return expects, tensors, sclars