示例#1
0
文件: resnet.py 项目: ProQHA/proqha
    def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
                 base_width=64, dilation=1, norm_layer=None,k_bits = 8):
        super(OriQuantBasicBlock, self).__init__()

        self.k_bits = k_bits

        if norm_layer is None:
            norm_layer = nn.BatchNorm2d
        if groups != 1 or base_width != 64:
            raise ValueError('BasicBlock only supports groups=1 and base_width=64')
        if dilation > 1:
            raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
        # Both self.conv1 and self.downsample layers downsample the input when stride != 1
        
        ## Layer 1 ##
        self.conv1 = quant_conv3x3(inplanes,planes,kernel_size=3,stride=stride,bias=False,k_bits=self.k_bits)
        self.bn1 = norm_layer(planes)
        self.relu1 = pact_quantize(self.k_bits)
        # self.conv1 = quant_conv3x3(inplanes,planes,kernel_size=3,stride=stride,bias=False,k_bits=self.k_bits)

        ## Layer 2 ##
        self.conv2 = quant_conv3x3(planes,planes,kernel_size=3,stride=1,bias=False,k_bits=self.k_bits)
        self.bn2 = norm_layer(planes)
        self.relu2 = pact_quantize(self.k_bits)
        
        self.downsample = downsample
        self.stride = stride
示例#2
0
    def __init__(self, in_planes, planes, stride=1, option='A',k_bits = 8):
        super(QuantBasicBlock, self).__init__()
        self.k_bits = k_bits

        self.bn1 = nn.BatchNorm2d(in_planes)
        self.relu1 = pact_quantize(self.k_bits)
        self.conv1= quant_conv3x3(in_planes,planes,kernel_size=3,stride=stride,padding=1,bias=False,k_bits=self.k_bits)
        
        self.bn2 = nn.BatchNorm2d(planes)
        self.relu2 = pact_quantize(self.k_bits)
        self.conv2 = quant_conv3x3(planes,planes,kernel_size=3,stride=1,padding=1,bias=False,k_bits=self.k_bits)

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != planes:
            if option == 'A':
                """
                For CIFAR10 ResNet paper uses option A.
                """
                self.shortcut = LambdaLayer(lambda x:
                                            F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
            elif option == 'B':
                self.shortcut = nn.Sequential(
                     nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
                     nn.BatchNorm2d(self.expansion * planes)
                )
示例#3
0
文件: resnet.py 项目: ProQHA/proqha
    def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
                 base_width=64, dilation=1, norm_layer=None,k_bits = 8):
        super(QuantBottleneck, self).__init__()
        # print(f'********************* QuantBottleNeck ************************')
        
        self.k_bits = k_bits
        if norm_layer is None:
            norm_layer = nn.BatchNorm2d
        width = int(planes * (base_width / 64.)) * groups
        # Both self.conv2 and self.downsample layers downsample the input when stride != 1
        # print(f'inplanes {inplanes} | width {width} | planes {planes}')
        self.bn1 = norm_layer(inplanes)
        self.relu1 = pact_quantize(inplanes)
        self.conv1 = quant_conv3x3(inplanes,width,kernel_size=1,stride=1, padding=0,dilation=1,bias=False,k_bits= self.k_bits)

        self.bn2 = norm_layer(width)
        self.relu2 = pact_quantize(self.k_bits)
        self.conv2 = quant_conv3x3(width,width,kernel_size=3,stride=stride,bias=False,k_bits=self.k_bits)
        
        self.bn3 = norm_layer(width)
        self.relu3 = pact_quantize(self.k_bits)
        self.conv3 = quant_conv3x3(width,planes * self.expansion,kernel_size=1,padding=0,dilation=1,stride=1,bias=False,k_bits = self.k_bits)

        self.downsample = downsample
        self.stride = stride
示例#4
0
    def __init__(self, inp, oup, stride, expand_ratio, k_bits=8, **kwargs):
        super(InvertedResidual, self).__init__()
        self.k_bits = k_bits
        self.stride = stride
        assert stride in [1, 2]
        hidden_dim = round(inp * expand_ratio)
        self.use_res_connect = self.stride == 1 and inp == oup

        if expand_ratio == 1:
            self.conv = nn.Sequential(
                #pw
                nn.BatchNorm2d(hidden_dim),
                pact_quantize(self.k_bits),
                quant_conv3x3(hidden_dim,
                              hidden_dim,
                              3,
                              1,
                              stride,
                              1,
                              bias=False,
                              groups=hidden_dim,
                              k_bits=self.k_bits),

                # pw-linear
                nn.BatchNorm2d(hidden_dim),
                pact_quantize(self.k_bits),
                quant_conv3x3(hidden_dim, oup, 1, 0, 1, 1, bias=False))

        else:

            self.conv = nn.Sequential(
                # pw
                nn.BatchNorm2d(inp),
                pact_quantize(self.k_bits),
                quant_conv3x3(inp, hidden_dim, 1, 0, 1, 1, bias=False),

                # dw
                nn.BatchNorm2d(hidden_dim),
                pact_quantize(self.k_bits),
                quant_conv3x3(hidden_dim,
                              hidden_dim,
                              3,
                              1,
                              stride,
                              1,
                              bias=False,
                              groups=hidden_dim),

                # pw-linear
                nn.BatchNorm2d(hidden_dim),
                pact_quantize(self.k_bits),
                quant_conv3x3(hidden_dim, oup, 1, 0, 1, 1, bias=False))
示例#5
0
def quant_conv_bn(inp, oup, stride, k_bits=8):
    return nn.Sequential(
        nn.BatchNorm2d(inp), pact_quantize(k_bits=k_bits),
        quant_conv3x3(inp,
                      oup,
                      kernel_size=3,
                      stride=stride,
                      padding=1,
                      bias=False))
示例#6
0
    def __init__(self, inplanes, filters,index,expansion=1, growthRate=12, dropRate=0,k_bits = 8):
        super(DenseBasicBlock, self).__init__()
        self.k_bits = k_bits

        self.bn = nn.BatchNorm2d(inplanes)
        self.relu = pact_quantize(self.k_bits)
        self.conv = quant_conv3x3(filters,growthRate,kernel_size=3,stride=1,padding=1,bias=False,k_bits=self.k_bits)

        self.dropRate = dropRate
示例#7
0
 def __init__(self, inplanes, outplanes, filters, index, k_bits=8):
     super(Transition, self).__init__()
     self.k_bits = k_bits
     self.bn = nn.BatchNorm2d(inplanes)
     self.relu = pact_quantize(self.k_bits)
     self.conv = quant_conv3x3(filters,
                               outplanes,
                               kernel_size=1,
                               stride=1,
                               padding=0,
                               bias=False,
                               k_bits=self.k_bits)
示例#8
0
 def __init__(self, features, num_classes=10, init_weights=True,k_bits = 8):
     super(VGG, self).__init__()
     self.k_bits=k_bits
     self.features = features
     self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
     self.classifier = nn.Sequential(
         # nn.Linear(512 * 7 * 7, 4096),
         # nn.ReLU(True),
         # # nn.Dropout(),
         # nn.Linear(4096, 4096),
         # nn.ReLU(True),
         # # nn.Dropout(),
         # nn.Linear(4096, num_classes),
         pact_quantize(self.k_bits),
         quant_linear(512 * 7 * 7, 4096,bias=True,k_bits=self.k_bits),
         # nn.Dropout(),
         pact_quantize(self.k_bits),
         quant_linear(4096, 4096,bias=True,k_bits=self.k_bits),
         # nn.Dropout(),
         nn.Linear(4096, num_classes),
     )
     if init_weights:
         self._initialize_weights()
示例#9
0
def make_layers(cfg, batch_norm=False,k_bits=8):
    layers = []
    in_channels = 1
    for v in cfg:
        if v == 'M':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:
            if in_channels == 1:
                conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
                relu = nn.ReLU(inplace=True)
            else:
                conv2d = quant_conv3x3(in_channels,v)
                relu = pact_quantize(k_bits)

            if batch_norm:
                layers += [nn.BatchNorm2d(in_channels),relu,conv2d]
            else:
                layers += [relu,conv2d]
            in_channels = v
    return nn.Sequential(*layers)