def __init__(self): super(Net, self).__init__() self.conv1 = BinarizeConv2d(1, 4, stride=2, kernel_size=3, bias=False) self.conv2 = BinarizeConv2d(4, 8, stride=2, kernel_size=3, bias=False) self.conv3 = BinarizeConv2d(8, 16, stride=2, kernel_size=3, bias=False) self.fc1 = BinarizeLinear(16, 8, bias=False) self.fc2 = BinarizeLinear(8, 1, bias=False)
def __init__(self): super(Net, self).__init__() self.bn0 = nn.BatchNorm1d(784) self.bin0 = BinarizeSign() self.fc1 = BinarizeLinear(784, 200) self.htanh1 = nn.Hardtanh() self.bn1 = nn.BatchNorm1d(200) self.bin1 = BinarizeSign() self.fc2 = BinarizeLinear(200, 100) self.htanh2 = nn.Hardtanh() self.bn2 = nn.BatchNorm1d(100) self.bin2 = BinarizeSign() self.fc3 = BinarizeLinear(100, 100) self.htanh3 = nn.Hardtanh() self.bn3 = nn.BatchNorm1d(100) self.bin3 = BinarizeSign() self.fc4 = BinarizeLinear(100, 100) self.htanh4 = nn.Hardtanh() self.bn4 = nn.BatchNorm1d(100) self.bin4 = BinarizeSign() self.fc5 = BinarizeLinear(100, 10) self.logsoftmax = nn.LogSoftmax(1)
def __init__(self): super(BinarizedCifar10MLP, self).__init__() self.fc = nn.Sequential(BinarizeLinear(3 * 32 * 32, 2048), nn.BatchNorm1d(2048), nn.Hardtanh(), BinarizeLinear(2048, 2048), nn.BatchNorm1d(2048), nn.Hardtanh(), BinarizeLinear(2048, 2048), nn.Dropout(0.5), nn.BatchNorm1d(2048), nn.Hardtanh(), nn.Linear(2048, 10), nn.LogSoftmax())
def __init__(self, num_classes=10, block=BasicBlock, depth=18): super(ResNet_fashionmnist, self).__init__() self.inflate = 5 self.inplanes = 16 * self.inflate n = int((depth - 2) / 6) self.conv1 = BinarizeConv2d(1, 16 * self.inflate, kernel_size=3, stride=1, padding=1, bias=False) self.maxpool = lambda x: x self.bn1 = nn.BatchNorm2d(16 * self.inflate) # self.tanh1 = nn.Hardtanh(inplace=True) self.tanh1 = nn.Hardtanh() # self.tanh2 = nn.Hardtanh(inplace=True) self.tanh2 = nn.Hardtanh() self.layer1 = self._make_layer(block, 16 * self.inflate, n) self.layer2 = self._make_layer(block, 32 * self.inflate, n, stride=2) self.layer3 = self._make_layer(block, 64 * self.inflate, n, stride=2, do_bntan=False) self.layer4 = lambda x: x self.avgpool = nn.AvgPool2d(8) self.bn2 = nn.BatchNorm1d(64 * self.inflate) self.bn3 = nn.BatchNorm1d(10) self.logsoftmax = nn.LogSoftmax() self.fc = BinarizeLinear(64 * self.inflate, num_classes) init_model(self) #self.regime = { # 0: {'optimizer': 'SGD', 'lr': 1e-1, # 'weight_decay': 1e-4, 'momentum': 0.9}, # 81: {'lr': 1e-4}, # 122: {'lr': 1e-5, 'weight_decay': 0}, # 164: {'lr': 1e-6} #} self.regime = { 0: { 'optimizer': 'Adam', 'lr': 1e-3, 'weight_decay': 1e-5 }, 101: { 'lr': 1e-3 }, 142: { 'lr': 5e-4 }, 184: { 'lr': 1e-4 }, 220: { 'lr': 1e-5 } }
def __init__(self): super(BinarizedLeNet5Cifar10, self).__init__() self.features = nn.Sequential( BinarizeConv2d(3, 32, kernel_size=3, padding=1), # out_dim= 32 * 32 * 32 nn.MaxPool2d(2, 2), # out_dim= 16 * 16 * 32 nn.BatchNorm2d(32), nn.Hardtanh(), BinarizeConv2d(32, 64, kernel_size=3, padding=1), # out_dim= 16 * 16 * 64 nn.MaxPool2d(2, 2), # out_dim= 8 * 8 * 64 nn.BatchNorm2d(64), nn.Hardtanh()) self.classifier = nn.Sequential(BinarizeLinear(8 * 8 * 64, 512), nn.BatchNorm1d(512), nn.Hardtanh(), BinarizeLinear(512, 256), nn.BatchNorm1d(256), nn.Hardtanh(), nn.Linear(256, 10), nn.LogSoftmax())
def __init__(self): super(BinAlexNet, self).__init__() self.features = nn.Sequential( BinarizeConv2d(3, 64, kernel_size=5, padding=1, bias=False), # out dim = 64*30*30 nn.MaxPool2d(kernel_size=2, stride=2), # out dim = 64 * 15* 15 nn.BatchNorm2d(64), nn.Hardtanh(), BinarizeConv2d(64, 192, kernel_size=5, padding=1, bias=False), # out dim = 192 * 13 * 13 nn.MaxPool2d(kernel_size=2, stride=2), # out dim = 192 * 6 * 6 nn.BatchNorm2d(192), nn.Hardtanh(inplace=True), BinarizeConv2d(192, 384, kernel_size=3, padding=1, bias=False), # out dim = 384 * 6 * 6 nn.BatchNorm2d(384), nn.Hardtanh(), BinarizeConv2d(384, 256, kernel_size=3, padding=1, bias=False), # out dim = 256 * 6 * 6 nn.BatchNorm2d(256), nn.Hardtanh(), BinarizeConv2d(256, 256, kernel_size=3, padding=1, bias=False), # out dim = 256 * 6 * 6 nn.MaxPool2d(kernel_size=3, stride=2), # out dim = 256 * 4 * 4 nn.BatchNorm2d(256), nn.Hardtanh() ) self.classifier = nn.Sequential( BinarizeLinear(256 * 2 * 2, 4096, bias=False), nn.BatchNorm1d(4096), nn.Hardtanh(), # nn.Dropout(0.5), BinarizeLinear(4096, 2048, bias=False), nn.BatchNorm1d(2048), nn.Hardtanh(), # nn.Dropout(0.5), BinarizeLinear(2048, 10) )
def __init__(self, num_classes=1000, block=Bottleneck, layers=[3, 4, 23, 3]): super(ResNet_imagenet, self).__init__() self.inplanes = 64 self.conv1 = BinarizeConv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) # self.tanh = nn.Hardtanh(inplace=True) self.tanh = nn.Hardtanh() self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AvgPool2d(7) self.fc = BinarizeLinear(512 * block.expansion, num_classes) init_model(self) self.regime = { 0: { 'optimizer': 'SGD', 'lr': 1e-1, 'weight_decay': 1e-4, 'momentum': 0.9 }, 30: { 'lr': 1e-2 }, 60: { 'lr': 1e-3, 'weight_decay': 0 }, 90: { 'lr': 1e-4 } }