def __init__(self, num_classes=10):
     super(VGG_SMALL_1W1A_AHTANHLayer, self).__init__()
     self.num_classes = num_classes
     self.conv0 = nn.Conv2d(3, 128, kernel_size=3, padding=1, bias=False)
     self.bn0 = nn.BatchNorm2d(128)
     self.nonlinear0 = alphaHtanhLayer()
     self.conv1 = ir_1w32a.IRConv2d(128,
                                    128,
                                    kernel_size=3,
                                    padding=1,
                                    bias=False)
     self.pooling = nn.MaxPool2d(kernel_size=2, stride=2)
     self.bn1 = nn.BatchNorm2d(128)
     self.nonlinear1 = alphaHtanhLayer()
     # self.nonlinear = nn.ReLU(inplace=True)
     # self.nonlinear = nn.Hardtanh(inplace=True)
     self.conv2 = ir_1w32a.IRConv2d(128,
                                    256,
                                    kernel_size=3,
                                    padding=1,
                                    bias=False)
     self.bn2 = nn.BatchNorm2d(256)
     self.nonlinear2 = alphaHtanhLayer()
     self.conv3 = ir_1w32a.IRConv2d(256,
                                    256,
                                    kernel_size=3,
                                    padding=1,
                                    bias=False)
     self.bn3 = nn.BatchNorm2d(256)
     self.nonlinear3 = alphaHtanhLayer()
     self.conv4 = ir_1w32a.IRConv2d(256,
                                    512,
                                    kernel_size=3,
                                    padding=1,
                                    bias=False)
     self.bn4 = nn.BatchNorm2d(512)
     self.nonlinear4 = alphaHtanhLayer()
     self.conv5 = ir_1w32a.IRConv2d(512,
                                    512,
                                    kernel_size=3,
                                    padding=1,
                                    bias=False)
     self.bn5 = nn.BatchNorm2d(512)
     self.nonlinear5 = alphaHtanhLayer()
     self.fc = nn.Linear(512 * 4 * 4, self.num_classes)
     self._initialize_weights()
Beispiel #2
0
def conv3x3Binary(in_planes, out_planes, stride=1):
    "3x3 convolution with padding"
    return ir_1w32a.IRConv2d(in_planes,
                             out_planes,
                             kernel_size=3,
                             stride=stride,
                             padding=1,
                             bias=False)
    def __init__(self, in_planes, planes, stride=1, option='A'):
        super(BasicBlock_1w32a, self).__init__()
        self.conv1 = ir_1w32a.IRConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(planes)
        self.conv2 = ir_1w32a.IRConv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(planes)

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != planes:
            if option == 'A':
                """
                For CIFAR10 ResNet paper uses option A.
                """
                self.shortcut = LambdaLayer(lambda x:
                                            F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
            elif option == 'B':
                self.shortcut = nn.Sequential(
                     ir_1w32a.IRConv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
                     nn.BatchNorm2d(self.expansion * planes)
                )