Ejemplo n.º 1
0
def test_quint4_typecvt():
    device = "xpux"
    shape = (3, 3, 3)
    data = np.random.random(shape).astype(np.float32) * 5 - 1

    def typecvt(x, dt=None):
        (y, ) = G.apply_normal_varnode(ops.TypeCvt(dtype=dt), x)
        return y

    # convert to quint4
    dtype = quint4(0.01, 5)
    oup = _get_compiled_result(data,
                               np.float32,
                               shape,
                               device,
                               calc_func=partial(typecvt, dt=dtype))
    _check_result_attr(oup, dtype, "quint4")
    np.testing.assert_equal(oup, convert_to_quint4(data, dtype))

    # convert from quint4 to float32
    oup_float = _get_compiled_result(oup,
                                     dtype,
                                     shape,
                                     device,
                                     calc_func=partial(typecvt, dt=np.float32))
    assert oup_float.dtype == np.float32
    np.testing.assert_equal(
        oup_float, convert_from_quint4(convert_to_quint4(data, dtype)))
Ejemplo n.º 2
0
def test_dtype_int4_ffi_handle():
    device = "xpux"
    shape = (3, 3, 3)
    data = np.random.random(shape).astype(np.float32) * 5 - 1
    print(data)

    def identity(x):
        return x

    dtype = quint4(0.01, 7)
    inp = convert_to_quint4(data, dtype)
    oup = _get_compiled_result(inp, dtype, shape, device, calc_func=identity)
    _check_result_attr(oup, dtype, "quint4")
    np.testing.assert_allclose(convert_from_quint4(oup), convert_from_quint4(inp))

    dtype = qint4(0.01)
    inp = convert_to_qint4(data, dtype)
    oup = _get_compiled_result(inp, dtype, shape, device, calc_func=identity)
    _check_result_attr(oup, dtype, "qint4", is_unsigned=False)
    np.testing.assert_allclose(convert_from_qint4(oup), convert_from_qint4(inp))
Ejemplo n.º 3
0
    def run(
        N,
        IC,
        OC,
        IH,
        IW,
        KH,
        KW,
        PH,
        PW,
        SH,
        SW,
        has_bias=True,
        nonlinear_mode="identity",
    ):
        inp_v = np.random.normal(size=(N, IC, IH, IW))
        w_v = np.random.normal(size=(OC, IC, KH, KW))
        b_v = np.random.normal(size=(1, OC, 1, 1))
        inp_scale = dtype.get_scale(inp_dtype)
        w_scale = dtype.get_scale(w_dtype)
        b_scale = dtype.get_scale(b_dtype)

        inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
        wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
        bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)

        inp_uint4 = mge.Tensor(inpv, dtype=inp_dtype)
        w_int4 = mge.Parameter(wv, dtype=w_dtype)
        b_int32 = mge.Parameter(bv, dtype=b_dtype)

        inp_fp32 = inp_uint4.astype("float32")
        w_fp32 = w_int4.astype("float32")
        b_fp32 = b_int32.astype("float32")

        def run_conv2d(inp, w, b):
            O = F.conv2d(
                inp,
                w,
                b if has_bias else None,
                stride=(SH, SW),
                padding=(PH, PW),
            )
            if nonlinear_mode == "relu":
                return F.relu(O)
            else:
                return O

        def run_conv_bias(inp, w, b):
            b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
            return F.quantized.conv_bias_activation(
                inp,
                w,
                b,
                stride=(SH, SW),
                padding=(PH, PW),
                dtype=out_dtype,
                nonlinear_mode=nonlinear_mode,
            )

        expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
        expected = expected.astype(out_dtype).astype("float32")
        result = run_conv_bias(inp_uint4, w_int4, b_int32).astype("float32")
        expected = F.flatten(expected)
        result = F.flatten(result)
        np.testing.assert_allclose(result.numpy(),
                                   expected.numpy(),
                                   atol=outp_scale)