Beispiel #1
0
 def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
     super(BasicBlock, self).__init__()
     self.bn1 = custom.BN_Class(in_planes)
     self.relu1 = nn.ReLU(inplace=True)
     self.conv1 = custom.Con2d_Class(in_planes,
                                     out_planes,
                                     kernel_size=3,
                                     stride=stride,
                                     padding=1,
                                     bias=False)
     self.bn2 = custom.BN_Class(out_planes)
     self.relu2 = nn.ReLU(inplace=True)
     self.conv2 = custom.Con2d_Class(out_planes,
                                     out_planes,
                                     kernel_size=3,
                                     stride=1,
                                     padding=1,
                                     bias=False)
     self.droprate = dropRate
     self.equalInOut = (in_planes == out_planes)
     self.convShortcut = (not self.equalInOut) and custom.Con2d_Class(
         in_planes,
         out_planes,
         kernel_size=1,
         stride=stride,
         padding=0,
         bias=False) or None
Beispiel #2
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = custom.BN_Class(planes)
     self.relu = nn.ReLU(inplace=False)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = custom.BN_Class(planes)
     self.downsample = downsample
     self.stride = stride
Beispiel #3
0
    def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = custom.Con2d_Class(3,
                                        64,
                                        kernel_size=7,
                                        stride=2,
                                        padding=3,
                                        bias=False)
        self.bn1 = custom.BN_Class(64)
        self.relu = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7, stride=1)
        self.classifier = custom.Linear_Class(512 * block.expansion,
                                              num_classes)

        for m in self.modules():
            if isinstance(m, custom.Con2d_Class):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, custom.BN_Class):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Beispiel #4
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = custom.Con2d_Class(inplanes,
                                     planes,
                                     kernel_size=1,
                                     bias=False)
     self.bn1 = custom.BN_Class(planes)
     self.conv2 = custom.Con2d_Class(planes,
                                     planes,
                                     kernel_size=3,
                                     stride=stride,
                                     padding=1,
                                     bias=False)
     self.bn2 = custom.BN_Class(planes)
     self.conv3 = custom.Con2d_Class(planes,
                                     planes * 4,
                                     kernel_size=1,
                                     bias=False)
     self.bn3 = custom.BN_Class(planes * 4)
     self.relu = nn.ReLU(inplace=False)
     self.downsample = downsample
     self.stride = stride
Beispiel #5
0
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                custom.Con2d_Class(self.inplanes,
                                   planes * block.expansion,
                                   kernel_size=1,
                                   stride=stride,
                                   bias=False),
                custom.BN_Class(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)
Beispiel #6
0
    def __init__(self, depth, num_classes=1000, block_name='BasicBlock'):
        super(ResNet, self).__init__()
        # Model type specifies number of layers for CIFAR-10 model
        if block_name.lower() == 'basicblock':
            assert (
                depth - 2
            ) % 6 == 0, 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
            n = (depth - 2) // 6
            block = BasicBlock
        elif block_name.lower() == 'bottleneck':
            assert (
                depth - 2
            ) % 9 == 0, 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
            n = (depth - 2) // 9
            block = Bottleneck
        else:
            raise ValueError('block_name shoule be Basicblock or Bottleneck')

        self.inplanes = 16
        self.conv1 = custom.Con2d_Class(3,
                                        16,
                                        kernel_size=3,
                                        padding=1,
                                        bias=False)
        self.bn1 = custom.BN_Class(16)
        self.relu = nn.ReLU(inplace=False)
        self.layer1 = self._make_layer(block, 16, n)
        self.layer2 = self._make_layer(block, 32, n, stride=2)
        self.layer3 = self._make_layer(block, 64, n, stride=2)
        self.avgpool = nn.AvgPool2d(8)
        self.classifier = custom.Linear_Class(64 * block.expansion,
                                              num_classes)

        for m in self.modules():
            if isinstance(m, custom.Linear_Class) or isinstance(
                    m, custom.Con2d_Class):
                init.kaiming_normal_(m.weight)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, custom.BN_Class):
                m.weight.data.fill_(1)
                if m.bias is not None:
                    m.bias.data.zero_()
Beispiel #7
0
    def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
        super(WideResNet, self).__init__()
        nChannels = [
            16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor
        ]
        assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
        n = (depth - 4) // 6
        block = BasicBlock
        # 1st conv before any network block
        self.conv1 = custom.Con2d_Class(3,
                                        nChannels[0],
                                        kernel_size=3,
                                        stride=1,
                                        padding=1,
                                        bias=False)
        # 1st block
        self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1,
                                   dropRate)
        # 2nd block
        self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2,
                                   dropRate)
        # 3rd block
        self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2,
                                   dropRate)
        # global average pooling and classifier
        self.bn1 = custom.BN_Class(nChannels[3])
        self.relu = nn.ReLU(inplace=True)
        self.classifier = custom.Linear_Class(nChannels[3], num_classes)
        self.nChannels = nChannels[3]

        for m in self.modules():
            if isinstance(m, custom.Con2d_Class):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, custom.BN_Class):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, custom.Linear_Class):
                if m.bias is not None:
                    m.bias.data.zero_()