Пример #1
0
 def __init__(self):
     super().__init__()
     self.quant = Float.QuantStub()
     self.linear = Float.Sequential(Float.Linear(3, 3), Float.Linear(3, 3))
     self.dequant = Float.DequantStub()
     self.linear[0].bias[...] = Parameter(np.random.rand(3))
     self.linear[1].bias[...] = Parameter(np.random.rand(3))
Пример #2
0
def test_quant_stub():
    normal_net = Float.QuantStub()
    normal_net.eval()

    qat_from_float = QAT.QuantStub.from_float_module(normal_net)
    qat_from_float.eval()
    disable_observer(qat_from_float)
    disable_fake_quant(qat_from_float)

    qat_net = QAT.QuantStub()
    qat_net.eval()
    disable_observer(qat_net)

    propagate_qconfig(qat_net, min_max_fakequant_qconfig)
    init_qat_net(qat_net)

    q_net = Q.QuantStub.from_qat_module(qat_net)
    q_net.eval()

    x = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))

    normal = normal_net(x)
    qat_without_fakequant = qat_from_float(x)
    fake_quant_normal = fake_quant_act(normal_net(x), act_scale)
    qat = qat_net(x)
    q = q_net(x).numpy() * act_scale
    np.testing.assert_allclose(qat_without_fakequant, normal)
    np.testing.assert_allclose(qat, fake_quant_normal)
    np.testing.assert_allclose(q, fake_quant_normal.numpy())
Пример #3
0
        def __init__(self, name):
            super().__init__(name=name)
            self.quant = M.QuantStub()
            self.linear = M.Linear(3, 3, bias=True)
            self.dequant = M.DequantStub()

            self.linear.weight.name = "user-weight"
            self.linear.bias.name = "user-bias"
Пример #4
0
 def __init__(self):
     super().__init__()
     self.quant = Float.QuantStub()
     self.linear = Float.Linear(3, 3)
     self.dequant = Float.DequantStub()
     self.linear.bias.set_value(np.random.rand(3))
Пример #5
0
    def __init__(
        self,
        num_classes=1000,
        width_mult=1.0,
        inverted_residual_setting=None,
        round_nearest=8,
    ):
        """
        MobileNet V2 main class

        Args:
            num_classes (int): Number of classes
            width_mult (float): Width multiplier - adjusts number of channels
            in each layer by this amount
            inverted_residual_setting: Network structure
            round_nearest (int): Round the number of channels in each layer
            to be a multiple of this number
            Set to 1 to turn off rounding
        """
        super(MobileNetV2, self).__init__()
        block = InvertedResidual
        input_channel = 32
        last_channel = 1280

        if inverted_residual_setting is None:
            inverted_residual_setting = [
                # t, c, n, s
                [1, 16, 1, 1],
                [6, 24, 2, 2],
                [6, 32, 3, 2],
                [6, 64, 4, 2],
                [6, 96, 3, 1],
                [6, 160, 3, 2],
                [6, 320, 1, 1],
            ]

        # only check the first element, assuming user knows t,c,n,s are required
        if (len(inverted_residual_setting) == 0
                or len(inverted_residual_setting[0]) != 4):
            raise ValueError("inverted_residual_setting should be non-empty "
                             "or a 4-element list, got {}".format(
                                 inverted_residual_setting))

        # building first layer
        input_channel = _make_divisible(input_channel * width_mult,
                                        round_nearest)
        self.last_channel = _make_divisible(
            last_channel * max(1.0, width_mult), round_nearest)
        features = [
            M.ConvBnRelu2d(3,
                           input_channel,
                           kernel_size=3,
                           padding=1,
                           stride=2,
                           bias=False)
        ]
        # building inverted residual blocks
        for t, c, n, s in inverted_residual_setting:
            output_channel = _make_divisible(c * width_mult, round_nearest)
            for i in range(n):
                stride = s if i == 0 else 1
                features.append(
                    block(input_channel,
                          output_channel,
                          stride,
                          expand_ratio=t))
                input_channel = output_channel
        # building last several layers
        features.append(
            M.ConvBnRelu2d(input_channel,
                           self.last_channel,
                           kernel_size=1,
                           bias=False))
        # make it M.Sequential
        self.features = M.Sequential(*features)

        # building classifier
        self.classifier = M.Sequential(
            M.Dropout(0.2),
            M.Linear(self.last_channel, num_classes),
        )
        self.classifier.disable_quantize()

        self.quant = M.QuantStub()
        self.dequant = M.DequantStub()

        # weight initialization
        for m in self.modules():
            if isinstance(m, M.Conv2d):
                M.init.msra_normal_(m.weight, mode="fan_out")
                if m.bias is not None:
                    M.init.zeros_(m.bias)
            elif isinstance(m, M.BatchNorm2d):
                M.init.ones_(m.weight)
                M.init.zeros_(m.bias)
            elif isinstance(m, M.Linear):
                M.init.normal_(m.weight, 0, 0.01)
                M.init.zeros_(m.bias)
Пример #6
0
    def __init__(
        self,
        block,
        layers,
        num_classes=1000,
        zero_init_residual=False,
        groups=1,
        width_per_group=64,
        replace_stride_with_dilation=None,
        norm=M.BatchNorm2d,
    ):
        super(ResNet, self).__init__()
        self.in_channels = 64
        self.dilation = 1
        if replace_stride_with_dilation is None:
            # each element in the tuple indicates if we should replace
            # the 2x2 stride with a dilated convolution instead
            replace_stride_with_dilation = [False, False, False]
        if len(replace_stride_with_dilation) != 3:
            raise ValueError(
                "replace_stride_with_dilation should be None "
                "or a 3-element tuple, got {}".format(replace_stride_with_dilation)
            )
        self.groups = groups
        self.base_width = width_per_group
        self.quant = M.QuantStub()
        self.dequant = M.DequantStub()
        self.conv_bn_relu1 = M.ConvBnRelu2d(
            3, self.in_channels, kernel_size=7, stride=2, padding=3, bias=False
        )
        self.maxpool = M.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0], norm=norm)
        self.layer2 = self._make_layer(
            block,
            128,
            layers[1],
            stride=2,
            dilate=replace_stride_with_dilation[0],
            norm=norm,
        )
        self.layer3 = self._make_layer(
            block,
            256,
            layers[2],
            stride=2,
            dilate=replace_stride_with_dilation[1],
            norm=norm,
        )
        self.layer4 = self._make_layer(
            block,
            512,
            layers[3],
            stride=2,
            dilate=replace_stride_with_dilation[2],
            norm=norm,
        )
        self.fc = M.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, M.Conv2d):
                M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
                if m.bias is not None:
                    fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
                    bound = 1 / math.sqrt(fan_in)
                    M.init.uniform_(m.bias, -bound, bound)
            elif isinstance(m, M.BatchNorm2d):
                M.init.ones_(m.weight)
                M.init.zeros_(m.bias)
            elif isinstance(m, M.Linear):
                M.init.msra_uniform_(m.weight, a=math.sqrt(5))
                if m.bias is not None:
                    fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
                    bound = 1 / math.sqrt(fan_in)
                    M.init.uniform_(m.bias, -bound, bound)

        # Zero-initialize the last BN in each residual branch,
        # so that the residual branch starts with zeros,
        # and each residual block behaves like an identity.
        # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
        if zero_init_residual:
            for m in self.modules():
                if isinstance(m, Bottleneck):
                    M.init.zeros_(m.bn3.weight)
                elif isinstance(m, BasicBlock):
                    M.init.zeros_(m.bn2.weight)
Пример #7
0
 def __init__(self, name):
     super().__init__(name=name)
     self.quant = M.QuantStub()
     self.linear = M.Linear(3, 3, bias=True, name="user-linear")
     self.dequant = M.DequantStub()