def test_elemwise(kind): x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32")) x1_scale = np.float32(np.random.rand() + 1) x1 = fake_quant(x1, x1_scale) x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale)) x1_int8 = quant(x1, x1_scale) x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32")) x2_scale = np.float32(np.random.rand() + 1) x2 = fake_quant(x2, x2_scale) x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale)) x2_int8 = quant(x2, x2_scale) output_scale = np.float32(np.random.rand() + 1) output_dtype = dtype.qint8(output_scale) quantized_kind = "q" + kind if kind in ("abs", "sin"): desired_out = fake_quant(_elwise(x1, mode=kind), output_scale) actual_out = (_elemwise_multi_type( x1_int8, mode=quantized_kind, dtype=output_dtype).numpy() * output_scale) else: desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale) actual_out = (_elemwise_multi_type( x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype).numpy() * output_scale) np.testing.assert_allclose(actual_out, desired_out.numpy())
def test_passive_observer(): qparams = create_qparams(QuantMode.SYMMERTIC, "qint8", mge.tensor(1.0)) m = PassiveObserver("qint8") m.set_qparams(qparams) assert m.orig_scale == 1.0 assert m.scale.numpy() == 1.0 assert m.get_qparams().dtype_meta == qparams.dtype_meta assert m.get_qparams().scale == qparams.scale assert m.get_qparams() == qparams
def test_conv(module): normal_net = getattr(Float, module)(3, 3, 3, 1, 1, 1, bias=True) normal_net.eval() qat_net = getattr(QAT, module)(3, 3, 3, 1, 1, 1, bias=True) qat_net.eval() disable_observer(qat_net) propagate_qconfig(qat_net, min_max_fakequant_qconfig) init_qat_net(qat_net) x = mge.tensor(np.random.normal(size=(1, 3, 3, 3)).astype("float32")) inp_scale = gen_inp_scale() x = fake_quant_act(x, inp_scale) x.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", inp_scale)) x_int8 = quant(x, inp_scale) weight = np.random.normal(size=(3, 3, 3, 3)).astype("float32") bias = np.random.normal(size=(1, 3, 1, 1)).astype("float32") if module in ("ConvBn2d", "ConvBnRelu2d"): normal_net.conv.weight[...] = fake_quant_weight(weight, weight_scale) normal_net.conv.bias[...] = fake_quant_bias(bias, inp_scale * weight_scale) qat_net.conv.weight[...] = Parameter(weight) qat_net.conv.bias[...] = Parameter(bias) else: normal_net.weight[...] = fake_quant_weight(weight, weight_scale) normal_net.bias[...] = fake_quant_bias(bias, inp_scale * weight_scale) qat_net.weight[...] = Parameter(weight) qat_net.bias[...] = Parameter(bias) qat_from_float = getattr(QAT, module).from_float_module(normal_net) qat_from_float.eval() disable_observer(qat_from_float) disable_fake_quant(qat_from_float) q_net = getattr(Q, module).from_qat_module(qat_net) q_net.eval() normal = normal_net(x) qat_without_fakequant = qat_from_float(x) fake_quant_normal = fake_quant_act(normal_net(x), act_scale) qat = qat_net(x) q = q_net(x_int8).numpy() * act_scale np.testing.assert_allclose(qat_without_fakequant, normal, atol=1e-5) np.testing.assert_allclose(qat, fake_quant_normal, atol=act_scale) np.testing.assert_allclose(q, fake_quant_normal.numpy(), atol=act_scale)
def test_linear(): normal_net = Float.Linear(3, 3, bias=True) normal_net.eval() qat_net = QAT.Linear(3, 3, bias=True) qat_net.eval() disable_observer(qat_net) propagate_qconfig(qat_net, min_max_fakequant_qconfig) init_qat_net(qat_net) x = mge.tensor(np.random.normal(size=(3, 3)).astype("float32")) inp_scale = gen_inp_scale() x = fake_quant_act(x, inp_scale) x.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", inp_scale)) x_int8 = quant(x, inp_scale) weight = np.random.normal(size=(3, 3)).astype("float32") bias = np.random.normal(size=(3, )).astype("float32") normal_net.weight[...] = fake_quant_weight(weight, weight_scale) normal_net.bias[...] = fake_quant_bias(bias, inp_scale * weight_scale) qat_net.weight[...] = Parameter(weight) qat_net.bias[...] = Parameter(bias) qat_from_float = QAT.Linear.from_float_module(normal_net) qat_from_float.eval() disable_fake_quant(qat_from_float) disable_observer(qat_from_float) q_net = Q.Linear.from_qat_module(qat_net) q_net.eval() normal = normal_net(x) qat_without_fakequant = qat_from_float(x) fake_quant_normal = fake_quant_act(normal_net(x), act_scale) qat = qat_net(x) q = q_net(x_int8).numpy() * act_scale np.testing.assert_allclose(qat_without_fakequant, normal) np.testing.assert_allclose(qat, fake_quant_normal.numpy()) np.testing.assert_allclose(q, fake_quant_normal.numpy())
def test_concat(): normal_net = Float.Concat() normal_net.eval() qat_net = QAT.Concat() qat_net.eval() disable_observer(qat_net) propagate_qconfig(qat_net, min_max_fakequant_qconfig) init_qat_net(qat_net) inps = [] inps_int8 = [] for i in range(3): inp_scale = gen_inp_scale() inps.append(mge.tensor( np.random.normal(size=(3, 3)).astype("float32"))) inps[i] = fake_quant_act(inps[i], inp_scale) inps[i].qparams.update( create_qparams(QuantMode.SYMMERTIC, "qint8", inp_scale)) inps_int8.append(quant(inps[i], inp_scale)) qat_from_float = QAT.Concat.from_float_module(normal_net) qat_from_float.eval() disable_fake_quant(qat_from_float) disable_observer(qat_from_float) q_net = Q.Concat.from_qat_module(qat_net) q_net.eval() normal = normal_net(inps) qat_without_fakequant = qat_from_float(inps) fake_quant_normal = fake_quant_act(normal_net(inps), act_scale) qat = qat_net(inps) q = q_net(inps_int8).numpy() * act_scale np.testing.assert_allclose(qat_without_fakequant, normal) np.testing.assert_allclose(qat, fake_quant_normal.numpy()) np.testing.assert_allclose(q, fake_quant_normal.numpy())