Exemple #1
0
    def __init__(self, block, layers, num_classes=1000, nbits_w=4, nbits_a=4, q_mode=Qmodes.kernel_wise):
        self.inplanes = 64
        super(ResNetQFN, self).__init__()
        self.nbits_w = nbits_w
        self.nbits_a = nbits_a
        self.q_mode = q_mode
        self.conv1 = nn.Sequential(
            ActQ(nbits=-1 if max(nbits_a, nbits_w) <= 0 else 8, signed=True),
            Conv2dQ(3, 64, kernel_size=7, stride=2, padding=3, bias=False, nbits=nbits_w, mode=q_mode),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True))  # del ActQ as LQ-Net
        self.maxpool = nn.Sequential(nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
                                     ActQ(nbits=nbits_a))
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.Sequential(nn.AvgPool2d(7, stride=1),
                                     ActQ(nbits=nbits_a))  # del ActQ as LQ-Net
        self.fc = LinearQ(512 * block.expansion, num_classes, nbits=nbits_w)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Exemple #2
0
    def _make_layers(self, cfg, nbits_w, nbits_a, q_mode):
        layers = []
        in_channels = 3

        for i, x in enumerate(cfg):
            if x == 'M':
                layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
            elif in_channels == 3:  # do not quantize first layer
                layers += [
                    nn.Conv2d(in_channels,
                              x,
                              kernel_size=3,
                              padding=1,
                              bias=False),
                    nn.BatchNorm2d(x),
                    nn.ReLU(inplace=True),
                    ActQ(nbits=nbits_a, l2=self.l2)
                ]
                in_channels = x
            else:
                layers += [
                    Conv2dQ(in_channels,
                            x,
                            kernel_size=3,
                            padding=1,
                            bias=False,
                            nbits=nbits_w,
                            mode=q_mode,
                            l2=self.l2),
                    nn.BatchNorm2d(x),
                    nn.ReLU(inplace=True),
                    ActQ(nbits=nbits_a, l2=self.l2)
                ]
                in_channels = x
        return nn.Sequential(*layers)
Exemple #3
0
 def __init__(self, inplanes, planes, stride=1, downsample=None, nbits_w=4, nbits_a=4, q_mode=Qmodes.kernel_wise,
              l2=True):
     super(BasicBlockQ, self).__init__()
     self.conv1 = nn.Sequential(convq3x3(inplanes, planes, stride, nbits_w=nbits_w, q_mode=q_mode, l2=l2),
                                nn.BatchNorm2d(planes),
                                nn.ReLU(inplace=True),
                                ActQ(nbits=nbits_a, l2=l2))
     self.conv2 = nn.Sequential(convq3x3(planes, planes, nbits_w=nbits_w, q_mode=q_mode, l2=l2),
                                nn.BatchNorm2d(planes),
                                ActQ(nbits=nbits_a, l2=l2))
     self.downsample = downsample
     self.stride = stride
     self.relu = nn.ReLU(inplace=True)
     self.out_actq = ActQ(nbits=nbits_a, l2=l2)
Exemple #4
0
 def __init__(self,
              vgg_name,
              nbits_w=4,
              nbits_a=4,
              q_mode=Qmodes.kernel_wise,
              l2=True):
     super(VGGQFI, self).__init__()
     self.l2 = l2
     self.features = self._make_layers(cfg[vgg_name],
                                       nbits_w=nbits_w,
                                       nbits_a=nbits_a,
                                       q_mode=q_mode)
     self.last_features = nn.Sequential(
         Conv2dQ(512,
                 512,
                 kernel_size=3,
                 padding=1,
                 bias=False,
                 nbits=nbits_w,
                 mode=q_mode,
                 l2=self.l2), nn.BatchNorm2d(512), nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=2, stride=2))
     scale = 1
     if vgg_name == 'VGG7Q':
         scale = 16
     self.expand_classifier = nn.Sequential(
         ActQ(nbits=-1 if max(nbits_a, nbits_w) <= 0 else 8,
              expand=True,
              l2=self.l2),
         LinearQ(512 * scale * 2, 10, nbits=nbits_w, l2=self.l2),
     )
Exemple #5
0
 def __init__(self, features, num_classes=1000, init_weights=True):
     super(VGGQ, self).__init__()
     self.features = features
     self.classifier = nn.Sequential(
         LinearQ(512 * 7 * 7, 4096),
         nn.ReLU(True),
         ActQ(),
         # nn.Dropout(),
         LinearQ(4096, 4096),
         nn.ReLU(True),
         ActQ(),
         # nn.Dropout(),
         nn.Linear(4096, num_classes),
     )
     if init_weights:
         self._initialize_weights()
Exemple #6
0
 def _make_layers(self, cfg, nbits_w, nbits_a, q_mode):
     layers = []
     in_channels = 3
     # change to actq+convq by Joey.Z on May 28 2019
     for i, x in enumerate(cfg):
         if x == 'M':
             layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
         elif in_channels == 3:  # first layer
             layers += [
                 ActQ(nbits=-1 if max(nbits_a, nbits_w) <= 0 else 8,
                      signed=True,
                      l2=self.l2),
                 Conv2dQ(in_channels,
                         x,
                         kernel_size=3,
                         padding=1,
                         bias=False,
                         nbits=nbits_w,
                         mode=q_mode,
                         l2=self.l2),
                 nn.BatchNorm2d(x),
                 nn.ReLU(inplace=True),
             ]
             in_channels = x
         elif i == 7:  # last layer
             layers += [
                 PACT(nbits=nbits_a),
                 Conv2dQ(in_channels,
                         x,
                         kernel_size=3,
                         padding=1,
                         bias=False,
                         nbits=nbits_w,
                         mode=q_mode,
                         l2=self.l2),
                 nn.BatchNorm2d(x),
                 nn.ReLU(inplace=True),
             ]
         else:
             layers += [
                 PACT(nbits=nbits_a),
                 Conv2dQ(in_channels,
                         x,
                         kernel_size=3,
                         padding=1,
                         bias=False,
                         nbits=nbits_w,
                         mode=q_mode,
                         l2=self.l2),
                 nn.BatchNorm2d(x),
                 nn.ReLU(inplace=True),
             ]
             in_channels = x
     return nn.Sequential(*layers)
Exemple #7
0
def make_layers(cfg, batch_norm=False):
    layers = []
    in_channels = 3
    for v in cfg:
        if v == 'M':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:
            if in_channels == 3:
                conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
            else:
                conv2d = Conv2dQ(in_channels, v, kernel_size=3, padding=1)
            if batch_norm:
                layers += [
                    conv2d,
                    nn.BatchNorm2d(v),
                    nn.ReLU(inplace=True),
                    ActQ()
                ]
            else:
                layers += [conv2d, nn.ReLU(inplace=True), ActQ()]
            in_channels = v
    return nn.Sequential(*layers)
Exemple #8
0
 def __init__(self, inplanes, planes, stride=1, downsample=None, nbits_w=4, nbits_a=4, q_mode=Qmodes.kernel_wise,
              l2=True):
     super(BottleneckQ, self).__init__()
     self.conv1 = nn.Sequential(
         Conv2dQ(inplanes, planes, kernel_size=1, bias=False, nbits=nbits_w, mode=q_mode, l2=l2),
         nn.BatchNorm2d(planes),
         nn.ReLU(inplace=True),
         ActQ(nbits=nbits_a, l2=l2))
     self.conv2 = nn.Sequential(
         Conv2dQ(planes, planes, kernel_size=3, stride=stride,
                 padding=1, bias=False, nbits=nbits_w, mode=q_mode, l2=l2),
         nn.BatchNorm2d(planes),
         nn.ReLU(inplace=True),
         ActQ(nbits=nbits_a, l2=l2))
     self.conv3 = nn.Sequential(
         Conv2dQ(planes, planes * 4, kernel_size=1, bias=False, nbits=nbits_w, mode=q_mode, l2=l2),
         nn.BatchNorm2d(planes * 4),
         ActQ(nbits=nbits_a, signed=True, l2=l2))
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.out_actq = ActQ(nbits=nbits_a, l2=l2)
     self.stride = stride
Exemple #9
0
 def _make_layers(self, cfg, nbits_w, nbits_a, q_mode):
     layers = []
     in_channels = 3
     first_bits = -1 if max(nbits_a, nbits_w) <= 0 else 8
     layers += [ActQ(nbits=first_bits, signed=True)]
     for x in cfg:
         if x == 'M':
             layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
         else:
             layers += [
                 Conv2dQ(in_channels,
                         x,
                         kernel_size=3,
                         padding=1,
                         bias=False,
                         nbits=nbits_w,
                         mode=q_mode,
                         l2=self.l2),
                 nn.BatchNorm2d(x),
                 nn.ReLU(inplace=True),
                 ActQ(nbits=nbits_a, l2=self.l2)
             ]
             in_channels = x
     return nn.Sequential(*layers)
Exemple #10
0
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                Conv2dQ(self.inplanes, planes * block.expansion,
                        kernel_size=1, stride=stride, bias=False,
                        nbits=self.nbits_w, mode=self.q_mode),
                nn.BatchNorm2d(planes * block.expansion),
                ActQ(nbits=self.nbits_a, signed=True),  # different with pre-trained model
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample,
                            nbits_w=self.nbits_w, nbits_a=self.nbits_a, q_mode=self.q_mode))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes, nbits_w=self.nbits_w,
                                nbits_a=self.nbits_a, q_mode=self.q_mode))

        return nn.Sequential(*layers)
Exemple #11
0
    def __init__(self,
                 inp,
                 oup,
                 stride,
                 expand_ratio,
                 nbits_w=4,
                 nbits_a=4,
                 q_mode=Qmodes.kernel_wise):
        super(InvertedResidualQ, self).__init__()
        self.stride = stride
        assert stride in [1, 2]

        hidden_dim = round(inp * expand_ratio)
        self.use_res_connect = self.stride == 1 and inp == oup
        self.out_actq = ActQ(nbits_a, signed=True)
        if expand_ratio == 1:
            self.conv = nn.Sequential(
                # dw
                Conv2dQ(hidden_dim,
                        hidden_dim,
                        3,
                        stride,
                        1,
                        groups=hidden_dim,
                        bias=False,
                        nbits=nbits_w,
                        mode=q_mode),
                nn.BatchNorm2d(hidden_dim),
                nn.ReLU6(inplace=True),
                ActQ(nbits=nbits_a),
                # pw-linear
                Conv2dQ(hidden_dim,
                        oup,
                        1,
                        1,
                        0,
                        bias=False,
                        nbits=nbits_w,
                        mode=q_mode),
                nn.BatchNorm2d(oup),
                ActQ(nbits=nbits_a, signed=True),
            )
        else:
            self.conv = nn.Sequential(
                # pw
                Conv2dQ(inp,
                        hidden_dim,
                        1,
                        1,
                        0,
                        bias=False,
                        nbits=nbits_w,
                        mode=q_mode),
                nn.BatchNorm2d(hidden_dim),
                nn.ReLU6(inplace=True),
                ActQ(nbits=nbits_a),
                # dw
                Conv2dQ(hidden_dim,
                        hidden_dim,
                        3,
                        stride,
                        1,
                        groups=hidden_dim,
                        bias=False,
                        nbits=nbits_w,
                        mode=q_mode),
                nn.BatchNorm2d(hidden_dim),
                nn.ReLU6(inplace=True),
                ActQ(nbits=nbits_a),
                # pw-linear
                Conv2dQ(hidden_dim,
                        oup,
                        1,
                        1,
                        0,
                        bias=False,
                        nbits=nbits_w,
                        mode=q_mode),
                nn.BatchNorm2d(oup),
                ActQ(nbits=nbits_a, signed=True),
            )
Exemple #12
0
 def __init__(self,
              num_classes=1000,
              nbits_w=4,
              nbits_a=4,
              q_mode=Qmodes.kernel_wise):
     super(AlexNetQFI, self).__init__()
     self.nbits_w = nbits_w
     self.nbits_a = nbits_a
     self.q_mode = q_mode
     self.features = nn.Sequential(
         ActQ(nbits=-1 if max(nbits_a, nbits_w) <= 0 else 8, signed=True),
         Conv2dQ(3,
                 64,
                 kernel_size=11,
                 stride=4,
                 padding=2,
                 nbits=nbits_w,
                 mode=q_mode),
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         ActQ(nbits=nbits_a),
         Conv2dQ(64,
                 192,
                 kernel_size=5,
                 padding=2,
                 nbits=nbits_w,
                 mode=q_mode),  # conv2
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         ActQ(nbits=nbits_a),
         Conv2dQ(192,
                 384,
                 kernel_size=3,
                 padding=1,
                 nbits=nbits_w,
                 mode=q_mode),  # conv3
         nn.ReLU(inplace=True),
         ActQ(nbits=nbits_a),
         Conv2dQ(384,
                 256,
                 kernel_size=3,
                 padding=1,
                 nbits=nbits_w,
                 mode=q_mode),  # conv4
         nn.ReLU(inplace=True),
         ActQ(nbits=nbits_a),
         Conv2dQ(256,
                 256,
                 kernel_size=3,
                 padding=1,
                 nbits=nbits_w,
                 mode=q_mode),  # conv5
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         ActQ(nbits=nbits_a),
     )
     self.classifier = nn.Sequential(
         # nn.Dropout(),
         LinearQ(256 * 6 * 6, 4096, nbits=nbits_w),  # fc6
         nn.ReLU(inplace=True),
         ActQ(nbits=nbits_a),
         # nn.Dropout(),
         LinearQ(4096, 4096, nbits=nbits_w),  # fc7
         nn.ReLU(inplace=True),
         ActQ(nbits=-1 if max(nbits_a, nbits_w) <= 0 else 8, expand=True),
     )
     # self.shared_fc = LinearQ(4096, num_classes, nbits=nbits_w)
     # self.last_add = EltwiseAdd(inplace=True)
     self.expand_fc = LinearQ(4096 * 2, num_classes, nbits=nbits_w)  # fc8
Exemple #13
0
 def __init__(self,
              num_classes=1000,
              nbits_w=4,
              nbits_a=4,
              q_mode=Qmodes.kernel_wise,
              l2=True):
     super(AlexNetQFN, self).__init__()
     self.features = nn.Sequential(
         ActQ(nbits=-1 if max(nbits_a, nbits_w) <= 0 else 8,
              signed=True,
              l2=l2),
         Conv2dQ(3,
                 64,
                 kernel_size=11,
                 stride=4,
                 padding=2,
                 nbits=nbits_w,
                 mode=q_mode,
                 l2=l2),  # conv1
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         ActQ(nbits=nbits_a, l2=l2),
         Conv2dQ(64,
                 192,
                 kernel_size=5,
                 padding=2,
                 nbits=nbits_w,
                 mode=q_mode,
                 l2=l2),  # conv2
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         ActQ(nbits=nbits_a, l2=l2),
         Conv2dQ(192,
                 384,
                 kernel_size=3,
                 padding=1,
                 nbits=nbits_w,
                 mode=q_mode,
                 l2=l2),  # conv3
         nn.ReLU(inplace=True),
         ActQ(nbits=nbits_a, l2=l2),
         Conv2dQ(384,
                 256,
                 kernel_size=3,
                 padding=1,
                 nbits=nbits_w,
                 mode=q_mode,
                 l2=l2),  # conv4
         nn.ReLU(inplace=True),
         ActQ(nbits=nbits_a, l2=l2),
         Conv2dQ(256,
                 256,
                 kernel_size=3,
                 padding=1,
                 nbits=nbits_w,
                 mode=q_mode,
                 l2=l2),  # conv5
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         ActQ(nbits=nbits_a, l2=l2),
     )
     self.classifier = nn.Sequential(
         # nn.Dropout(),
         LinearQ(256 * 6 * 6,
                 4096,
                 nbits=nbits_w,
                 mode=Qmodes.layer_wise,
                 l2=l2),  # fc6
         nn.ReLU(inplace=True),
         ActQ(nbits=nbits_a, l2=l2),
         # nn.Dropout(),
         LinearQ(4096, 4096, nbits=nbits_w, mode=Qmodes.layer_wise,
                 l2=l2),  # fc7
         nn.ReLU(inplace=True),
         ActQ(nbits=nbits_a, l2=l2),  # key layer
         LinearQ(4096,
                 num_classes,
                 nbits=nbits_w,
                 mode=Qmodes.layer_wise,
                 l2=l2),  # fc8
     )
Exemple #14
0
 def __init__(self,
              num_classes=1000,
              nbits_w=4,
              nbits_a=4,
              q_mode=Qmodes.kernel_wise,
              l2=True):
     super(AlexNetQ, self).__init__()
     self.features = nn.Sequential(
         nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),  # conv1
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         ActQ(nbits=nbits_a, l2=l2),
         Conv2dQ(64,
                 192,
                 kernel_size=5,
                 padding=2,
                 nbits=nbits_w,
                 mode=q_mode,
                 l2=l2),  # conv2
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         ActQ(nbits=nbits_a, l2=l2),
         Conv2dQ(192,
                 384,
                 kernel_size=3,
                 padding=1,
                 nbits=nbits_w,
                 mode=q_mode,
                 l2=l2),  # conv3
         nn.ReLU(inplace=True),
         ActQ(nbits=nbits_a, l2=l2),
         Conv2dQ(384,
                 256,
                 kernel_size=3,
                 padding=1,
                 nbits=nbits_w,
                 mode=q_mode,
                 l2=l2),  # conv4
         nn.ReLU(inplace=True),
         ActQ(nbits=nbits_a, l2=l2),
         Conv2dQ(256,
                 256,
                 kernel_size=3,
                 padding=1,
                 nbits=nbits_w,
                 mode=q_mode,
                 l2=l2),  # conv5
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         ActQ(nbits=nbits_a, l2=l2),
     )
     self.classifier = nn.Sequential(
         nn.Dropout(),
         # As the experiment result shows, there is no difference between layer wise with kernel wise.
         LinearQ(256 * 6 * 6,
                 4096,
                 nbits=nbits_w,
                 mode=Qmodes.layer_wise,
                 l2=l2),  # fc6
         nn.ReLU(inplace=True),
         ActQ(nbits=nbits_a, l2=l2),
         nn.Dropout(),
         LinearQ(4096, 4096, nbits=nbits_w, mode=q_mode.layer_wise,
                 l2=l2),  # fc7
         nn.ReLU(inplace=True),
         nn.Linear(4096, num_classes),  # fc8
     )
Exemple #15
0
 def __init__(self, dim=0, nbits=4, signed=False, l2=True):
     super(ConcatQ, self).__init__()
     self.dim = dim
     self.actq = ActQ(nbits=nbits, signed=signed, l2=l2)