def __init__(self, in_planes, planes, stride=1):
        super(Bottleneck, self).__init__()
        self.conv1 = Quantize(nn.Conv2d)(in_planes,
                                         planes,
                                         kernel_size=1,
                                         bias=False)
        self.bn1 = nn.BatchNorm2d(planes)
        self.conv2 = Quantize(nn.Conv2d)(planes,
                                         planes,
                                         kernel_size=3,
                                         stride=stride,
                                         padding=1,
                                         bias=False)
        self.bn2 = nn.BatchNorm2d(planes)
        self.conv3 = Quantize(nn.Conv2d)(planes,
                                         self.expansion * planes,
                                         kernel_size=1,
                                         bias=False)
        self.bn3 = nn.BatchNorm2d(self.expansion * planes)

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != self.expansion * planes:
            self.shortcut = nn.Sequential(
                Quantize(nn.Conv2d)(in_planes,
                                    self.expansion * planes,
                                    kernel_size=1,
                                    stride=stride,
                                    bias=False),
                nn.BatchNorm2d(self.expansion * planes))
    def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = Quantize(nn.Conv2d)(3,
                                         64,
                                         kernel_size=7,
                                         stride=2,
                                         padding=3,
                                         bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7)
        self.fc = Quantize(nn.Linear)(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
示例#3
0
 def __init__(self, features, num_classes=1000):
     super(VGG, self).__init__()
     self.features = features
     self.classifier = nn.Sequential(
         Quantize(nn.Linear)(512 * 7 * 7, 4096),
         nn.ReLU(True),
         nn.Dropout(),
         Quantize(nn.Linear)(4096, 4096),
         nn.ReLU(True),
         nn.Dropout(),
         Quantize(nn.Linear)(4096, num_classes),
     )
     self._initialize_weights()
    def __init__(self, block, num_blocks, num_classes=10):
        super(ResNet, self).__init__()
        self.in_planes = 64

        self.conv1 = Quantize(nn.Conv2d)(3,
                                         64,
                                         kernel_size=3,
                                         stride=1,
                                         padding=1,
                                         bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
        self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
        self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
        self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
        self.linear = Quantize(nn.Linear)(512 * block.expansion, num_classes)
def conv3x3(in_planes, out_planes, stride=1):
    "3x3 convolution with padding"
    return Quantize(nn.Conv2d)(in_planes,
                               out_planes,
                               kernel_size=3,
                               stride=stride,
                               padding=1,
                               bias=False)
示例#6
0
 def __init__(self, num_classes=1000, small_input=False, use_ttq=False):
     super(AlexNet, self).__init__()
     self.feature_output_size = 256 if small_input else 256 * 6 * 6
     if not use_ttq:
         Conv2d = nn.Conv2d
         Linear = nn.Linear
     else:
         Conv2d = Quantize(nn.Conv2d)
         Linear = Quantize(nn.Linear)
     feature_layers = [
         Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         Conv2d(64, 192, kernel_size=5, padding=2),
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         Conv2d(192, 384, kernel_size=3, padding=1),
         nn.ReLU(inplace=True),
         Conv2d(384, 256, kernel_size=3, padding=1),
         nn.ReLU(inplace=True),
         Conv2d(256, 256, kernel_size=3, padding=1),
         nn.ReLU(inplace=True),
     ]
     if not small_input:
         feature_layers.append(nn.MaxPool2d(kernel_size=3, stride=2))
     self.features = nn.Sequential(*feature_layers)
     fc_size = min(self.feature_output_size, 4096)
     self.classifier = nn.Sequential(
         nn.Dropout(0.5),
         Linear(self.feature_output_size, fc_size),
         nn.ReLU(inplace=True),
         nn.Dropout(0.5),
         Linear(fc_size, fc_size),
         nn.ReLU(inplace=True),
         Linear(fc_size, num_classes),
     )
     for m in self.modules():
         if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
             init.kaiming_uniform(m.weight.data)
             if m.bias is not None:
                 m.bias.data.zero_()
         if isinstance(m, TTQ):
             init.uniform(m.W_p.data, 0.05, 0.1)
             init.uniform(m.W_n.data, -0.1, -0.05)
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = Quantize(nn.Conv2d)(inplanes,
                                      planes,
                                      kernel_size=1,
                                      bias=False)
     self.bn1 = nn.BatchNorm2d(planes)
     self.conv2 = Quantize(nn.Conv2d)(planes,
                                      planes,
                                      kernel_size=3,
                                      stride=stride,
                                      padding=1,
                                      bias=False)
     self.bn2 = nn.BatchNorm2d(planes)
     self.conv3 = Quantize(nn.Conv2d)(planes,
                                      planes * 4,
                                      kernel_size=1,
                                      bias=False)
     self.bn3 = nn.BatchNorm2d(planes * 4)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
示例#8
0
def make_layers(cfg, batch_norm=False):
    layers = []
    in_channels = 3
    for v in cfg:
        if v == 'M':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:
            conv2d = Quantize(nn.Conv2d)(in_channels, v, kernel_size=3, padding=1)
            if batch_norm:
                layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
            else:
                layers += [conv2d, nn.ReLU(inplace=True)]
            in_channels = v
    return nn.Sequential(*layers)
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                Quantize(nn.Conv2d)(self.inplanes,
                                    planes * block.expansion,
                                    kernel_size=1,
                                    stride=stride,
                                    bias=False),
                nn.BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)
示例#10
0
 def __init__(self):
     super().__init__()
     self.fc1 = Quantize(nn.Linear)(784, 512)
     self.fc2 = Quantize(nn.Linear)(512, 512)
     self.fc3 = Quantize(nn.Linear)(512, 512)
     self.fc4 = Quantize(nn.Linear)(512, 10)
示例#11
0
 def __init__(self):
     super().__init__()
     self.conv1 = Quantize(nn.Conv2d)(1, 10, kernel_size=5)
     self.conv2 = Quantize(nn.Conv2d)(10, 20, kernel_size=5)
     self.fc1 = Quantize(nn.Linear)(320, 128)
     self.fc2 = Quantize(nn.Linear)(128, 10)