예제 #1
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = QConv2d(inplanes, planes, kernel_size=1, bias=False,
                          num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD)
     self.bn1 = nn.BatchNorm2d(planes)
     self.conv2 = QConv2d(planes, planes, kernel_size=3, stride=stride,
                          padding=1, bias=False, num_bits=NUM_BITS,
                          num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD)
     self.bn2 = nn.BatchNorm2d(planes)
     self.conv3 = QConv2d(planes, planes * 4, kernel_size=1, bias=False,
                          num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD)
     self.bn3 = nn.BatchNorm2d(planes * 4)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
예제 #2
0
    def __init__(self, num_classes=1000,
                 block=Bottleneck, layers=[3, 4, 23, 3]):
        super(ResNet_imagenet, self).__init__()
        self.inplanes = 64
        self.conv1 = QConv2d(3, 64, kernel_size=7, stride=2, padding=3,
                             bias=False, num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7)
        self.fc = QLinear(512 * block.expansion, num_classes, num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD)

        init_model(self)
        # self.regime = [
        #     {'epoch': 0, 'optimizer': 'SGD', 'lr': 1e-1,
        #      'weight_decay': 1e-4, 'momentum': 0.9},
        #     {'epoch': 30, 'lr': 1e-2},
        #     {'epoch': 60, 'lr': 1e-3, 'weight_decay': 0},
        #     {'epoch': 90, 'lr': 1e-4}
        # ]

        self.regime = [
            {'epoch': 0, 'optimizer': 'RMSProp', 'lr': 1e-1,
             'weight_decay': 1e-4, 'momentum': 0.9},
            {'epoch': 30, 'lr': 1e-2},
            {'epoch': 60, 'lr': 1e-3, 'weight_decay': 0},
            {'epoch': 90, 'lr': 1e-4}
        ]
    def __init__(self, num_classes=10,
                 block=BasicBlock, depth=18):
        super(ResNet_cifar10, self).__init__()
        self.inplanes = 16
        n = int((depth - 2) / 6)
        self.conv1 = QConv2d(3, 16, kernel_size=3, stride=1, padding=1,
                             bias=False, num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD, biprecision=BIPRECISION)
        self.bn1 = RangeBN(16, num_bits=NUM_BITS, num_bits_grad=NUM_BITS_GRAD)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = lambda x: x
        self.layer1 = self._make_layer(block, 16, n)
        self.layer2 = self._make_layer(block, 32, n, stride=2)
        self.layer3 = self._make_layer(block, 64, n, stride=2)
        self.layer4 = lambda x: x
        self.avgpool = nn.AvgPool2d(8)
        self.fc = QLinear(64, num_classes, num_bits=NUM_BITS,
                          num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD, biprecision=BIPRECISION)

        init_model(self)
        self.regime = [
            {'epoch': 0, 'optimizer': 'SGD', 'lr': 1e-1,
             'weight_decay': 1e-4, 'momentum': 0.9},
            {'epoch': 81, 'lr': 1e-2},
            {'epoch': 122, 'lr': 1e-3, 'weight_decay': 0},
            {'epoch': 164, 'lr': 1e-4}
        ]
    def __init__(self, num_classes=1000,
                 block=Bottleneck, layers=[3, 4, 23, 3]):
        super(ResNet_imagenet, self).__init__()
        self.inplanes = 64
        self.conv1 = QConv2d(3, 64, kernel_size=7, stride=2, padding=3,
                             bias=False, num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD, biprecision=BIPRECISION)
        self.bn1 = RangeBN(64, num_bits=NUM_BITS, num_bits_grad=NUM_BITS_GRAD)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7)
        self.fc = QLinear(512 * block.expansion, num_classes, num_bits=NUM_BITS,
                          num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD, biprecision=BIPRECISION)

        init_model(self)
        batch_size = 256.

        scale = batch_size / 256.

        def ramp_up_lr(lr0, lrT, T):
            rate = (lrT - lr0) / T
            return "lambda t: {'lr': %s + t * %s}" % (lr0, rate)
        self.regime = [
            {'epoch': 0, 'optimizer': 'SGD', 'momentum': 0.9,
                'step_lambda': ramp_up_lr(0, 0.1 * scale, 5004 * 5 / scale)},
            {'epoch': 5,  'lr': scale * 1e-1},
            {'epoch': 30, 'lr': scale * 1e-2},
            {'epoch': 60, 'lr': scale * 1e-3},
            {'epoch': 80, 'lr': scale * 1e-4}
        ]
예제 #5
0
    def __init__(self, num_classes=10,
                 block=BasicBlock, depth=18):
        super(ResNet_cifar10, self).__init__()
        self.inflate = 5
        self.inplanes = 16 * self.inflate
        n = int((depth - 2) / 6)
        self.conv1 = QConv2d(3, 16 * self.inflate, kernel_size=3, stride=1, padding=1,
                             bias=False, num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD)
        self.bn1 = nn.BatchNorm2d(16 * self.inflate)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = lambda x: x
        self.layer1 = self._make_layer(block, 16 * self.inflate, n)
        self.layer2 = self._make_layer(block, 32 * self.inflate, n, stride=2)
        self.layer3 = self._make_layer(block, 64 * self.inflate, n, stride=2)
        self.layer4 = lambda x: x
        self.avgpool = nn.AvgPool2d(8)

        self.bn2 = nn.BatchNorm1d(64 * self.inflate)
        self.bn3 = nn.BatchNorm1d(10)

        self.logsoftmax = nn.LogSoftmax()

        self.fc = QLinear(64 * self.inflate, num_classes, num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT,
                          num_bits_grad=NUM_BITS_GRAD)

        init_model(self)
        self.regime = [
            {'epoch': 0, 'optimizer': 'SGD', 'lr': 1e-1,
             'weight_decay': 1e-4, 'momentum': 0.9},
            {'epoch': 81, 'lr': 1e-2},
            {'epoch': 122, 'lr': 1e-3, 'weight_decay': 0},
            {'epoch': 164, 'lr': 1e-4}
        ]

        # self.regime = [
        #     {'epoch': 0, 'optimizer': 'SGD', 'lr': 1e-3, 'momentum': 0.6},
        #     {'epoch': 81, 'lr': 5e-3},
        #     {'epoch': 101, 'lr': 1e-3,},
        #     {'epoch': 164, 'lr': 1e-4}
        # ]
        # self.regime = [
        #     {'epoch': 0, 'optimizer': 'SGD', 'lr': 1e-2, 'momentum': 0.9},
        #     {'epoch': 41, 'lr': 5e-3},
        #     {'epoch': 81, 'lr': 1e-3,},
        #     {'epoch': 101, 'lr': 1e-4}
        # ]
        # self.regime = [
        #     {'epoch': 0, 'optimizer': 'Adam', 'lr': 1e-3},
        #     {'epoch': 41, 'lr': 5e-4},
        #     {'epoch': 81, 'lr': 1e-3,},
        #     {'epoch': 101, 'lr': 1e-4}
        # ]

        self.regime = {
            0: {'optimizer': 'Adam', 'lr': 5e-3},
            101: {'lr': 1e-3},
            142: {'lr': 5e-4},
            184: {'lr': 1e-4},
            220: {'lr': 1e-5}
        }
def conv3x3(in_planes, out_planes, stride=1):
    "3x3 convolution with padding"
    return QConv2d(in_planes,
                   out_planes,
                   kernel_size=3,
                   stride=stride,
                   padding=1,
                   bias=True,
                   momentum=MOMENTUM,
                   quant_act_forward=ACT_FW,
                   quant_act_backward=ACT_BW,
                   quant_grad_act_error=GRAD_ACT_ERROR,
                   quant_grad_act_gc=GRAD_ACT_GC)
예제 #7
0
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                QConv2d(self.inplanes, planes * block.expansion,
                        kernel_size=1, stride=stride, bias=False,
                        num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD),
                nn.BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)
def conv(in_channels,
         out_channels,
         kernel_size,
         stride=1,
         padding=0,
         dilation=1,
         groups=1,
         bias=True):
    return QConv2d(in_channels,
                   out_channels,
                   kernel_size=kernel_size,
                   stride=stride,
                   padding=padding,
                   dilation=dilation,
                   groups=groups,
                   bias=bias,
                   momentum=MOMENTUM,
                   quant_act_forward=ACT_FW,
                   quant_act_backward=ACT_BW,
                   quant_grad_act_error=GRAD_ACT_ERROR,
                   quant_grad_act_gc=GRAD_ACT_GC)
def conv3x3(in_planes, out_planes, stride=1):
    "3x3 convolution with padding"
    return QConv2d(in_planes, out_planes, kernel_size=3, stride=stride,
                   padding=1, bias=False, num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD, biprecision=BIPRECISION)