示例#1
0
        def __init__(self, ):
            super().__init__()
            self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
            self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
            self.add1 = M.Elemwise("add")
            self.add2 = M.Elemwise("add")
            self.add3 = M.Elemwise("add")

            scale = mge.tensor((16.0 / 128.0))
            self.quant_stub = QuantStub()
            self.quant_stub.act_fake_quant = FakeQuantize(
                _builtin_quant_dtypes["qint8"])
            self.quant_stub.act_fake_quant.set_qparams(
                create_qparams(
                    dtype_meta=_builtin_quant_dtypes["qint8"],
                    scale=scale,
                    zero_point=None,
                ))
            self.quant_stub1 = QuantStub()
            self.quant_stub1.act_fake_quant = FakeQuantize(
                _builtin_quant_dtypes["qint8"])
            self.quant_stub1.act_fake_quant.set_qparams(
                create_qparams(
                    dtype_meta=_builtin_quant_dtypes["qint8"],
                    scale=scale,
                    zero_point=None,
                ))
示例#2
0
    def __init__(self, inp, oup, stride, expand_ratio):
        super(InvertedResidual, self).__init__()
        self.stride = stride
        assert stride in [1, 2]

        hidden_dim = int(round(inp * expand_ratio))
        self.use_res_connect = self.stride == 1 and inp == oup

        layers = []
        if expand_ratio != 1:
            # pw
            layers.append(
                M.ConvBnRelu2d(inp, hidden_dim, kernel_size=1, bias=False))
        layers.extend([
            # dw
            M.ConvBnRelu2d(
                hidden_dim,
                hidden_dim,
                kernel_size=3,
                padding=1,
                stride=stride,
                groups=hidden_dim,
                bias=False,
            ),
            # pw-linear
            M.ConvBn2d(hidden_dim, oup, kernel_size=1, bias=False),
        ])
        self.conv = M.Sequential(*layers)
        self.add = M.Elemwise("ADD")
示例#3
0
文件: resnet.py 项目: zzh7982/Models
 def __init__(
     self,
     in_channels,
     channels,
     stride=1,
     groups=1,
     base_width=64,
     dilation=1,
     norm=M.BatchNorm2d,
 ):
     assert norm is M.BatchNorm2d, "Quant mode only support BatchNorm2d currently."
     super(Bottleneck, self).__init__()
     width = int(channels * (base_width / 64.0)) * groups
     self.conv_bn_relu1 = M.ConvBnRelu2d(in_channels, width, 1, 1, bias=False)
     self.conv_bn_relu2 = M.ConvBnRelu2d(
         width,
         width,
         3,
         stride,
         padding=dilation,
         groups=groups,
         dilation=dilation,
         bias=False,
     )
     self.conv_bn3 = M.ConvBn2d(width, channels * self.expansion, 1, 1, bias=False)
     self.downsample = (
         M.Identity()
         if in_channels == channels * self.expansion and stride == 1
         else M.ConvBn2d(
             in_channels, channels * self.expansion, 1, stride, bias=False
         )
     )
     self.add = M.Elemwise("FUSE_ADD_RELU")
示例#4
0
文件: resnet.py 项目: zzh7982/Models
 def __init__(
     self,
     in_channels,
     channels,
     stride=1,
     groups=1,
     base_width=64,
     dilation=1,
     norm=M.BatchNorm2d,
 ):
     assert norm is M.BatchNorm2d, "Quant mode only support BatchNorm2d currently."
     super(BasicBlock, self).__init__()
     if groups != 1 or base_width != 64:
         raise ValueError("BasicBlock only supports groups=1 and base_width=64")
     if dilation > 1:
         raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
     self.conv_bn_relu1 = M.ConvBnRelu2d(
         in_channels, channels, 3, stride, padding=dilation, bias=False
     )
     self.conv_bn2 = M.ConvBn2d(channels, channels, 3, 1, padding=1, bias=False)
     self.downsample = (
         M.Identity()
         if in_channels == channels and stride == 1
         else M.ConvBn2d(in_channels, channels, 1, stride, bias=False)
     )
     self.add = M.Elemwise("FUSE_ADD_RELU")
示例#5
0
 def __init__(self):
     super().__init__()
     self.modules = [
         M.Elemwise("ADD"),
         M.Elemwise("ADD"),
         OrderedDict([("a", M.Elemwise("ADD")), ("b", M.Elemwise("ADD"))]),
         M.Elemwise("RELU"),
         M.Elemwise("RELU"),
     ]
示例#6
0
def test_elemwise(kind):
    normal_net = Float.Elemwise(kind)
    normal_net.eval()

    qat_from_float = QAT.Elemwise.from_float_module(normal_net)
    qat_from_float.eval()
    disable_observer(qat_from_float)
    disable_fake_quant(qat_from_float)

    qat_net = QAT.Elemwise(kind)
    qat_net.eval()
    disable_observer(qat_net)

    propagate_qconfig(qat_net, min_max_fakequant_qconfig)
    init_qat_net(qat_net)

    q_net = Q.Elemwise.from_qat_module(qat_net)
    q_net.eval()

    x1_scale = np.float32(np.random.rand() + 1)
    x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
    x1 = fake_quant_act(x1, x1_scale)
    x1.qparams.scale = x1_scale

    x2_scale = np.float32(np.random.rand() + 1)
    x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
    x2 = fake_quant_act(x2, x2_scale)
    x2.qparams.scale = x2_scale

    x1_int8 = quant(x1, x1_scale)
    x2_int8 = quant(x2, x2_scale)

    # test correctness of `Float`, `QAT` and `Quantized`
    if kind in ("add", "mul", "fuse_add_relu"):
        normal = normal_net(x1, x2)
        qat_without_fakequant = qat_from_float(x1, x2)
        fake_quant_normal = fake_quant_act(normal_net(x1, x2), act_scale)
        qat = qat_net(x1, x2)
        q = q_net(x1_int8, x2_int8).numpy() * act_scale
    else:
        normal = normal_net(x1)
        qat_without_fakequant = qat_from_float(x1)
        fake_quant_normal = fake_quant_act(normal_net(x1), act_scale)
        qat = qat_net(x1)
        q = q_net(x1_int8).numpy() * act_scale
    np.testing.assert_allclose(qat_without_fakequant, normal)
    np.testing.assert_allclose(qat, fake_quant_normal)
    np.testing.assert_allclose(q, fake_quant_normal.numpy())
示例#7
0
def test_elemwise(kind):
    normal_net = Float.Elemwise(kind)
    normal_net.eval()
    qat_net = QAT.Elemwise(kind)
    qat_net.eval()
    disable_observer(qat_net)

    propagate_qconfig(qat_net, min_max_fakequant_qconfig)
    init_qat_net(qat_net)

    q_net = Q.Elemwise.from_qat_module(qat_net)
    q_net.eval()

    x1_scale = np.float32(np.random.rand() + 1)
    x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
    x1 = fake_quant(x1, x1_scale)
    x1.q_dict["scale"] = x1_scale

    x2_scale = np.float32(np.random.rand() + 1)
    x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
    x2 = fake_quant(x2, x2_scale)
    x2.q_dict["scale"] = x2_scale

    x1_int8 = quant(x1, x1_scale)
    x2_int8 = quant(x2, x2_scale)

    if kind in ("ADD", "MUL", "FUSE_ADD_RELU"):
        normal_out = fake_quant(normal_net(x1, x2), act_scale)
        qat_out = qat_net(x1, x2)
        q_out = q_net(x1_int8, x2_int8).numpy() * act_scale
    else:
        normal_out = fake_quant(normal_net(x1), act_scale)
        qat_out = qat_net(x1)
        q_out = q_net(x1_int8).numpy() * act_scale
    np.testing.assert_allclose(qat_out, normal_out)
    np.testing.assert_allclose(q_out, normal_out.numpy())
示例#8
0
 def __init__(self, ):
     super().__init__()
     self.conv0 = MyConvBnRelu2d(3, 3, 3, 1, 1)
     self.conv1 = M.ConvBn2d(3, 3, 1, 1, 0)
     self.conv2 = M.ConvBn2d(3, 3, 1, 1, 0)
     self.add = M.Elemwise("FUSE_ADD_RELU")
示例#9
0
 def __init__(self):
     super().__init__()
     self.a = M.Elemwise("ADD")
     self.b = self.a