示例#1
0
 def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
     super(BasicBlock, self).__init__()
     self.bn1 = custom.BN_Class(in_planes)
     self.relu1 = nn.ReLU(inplace=True)
     self.conv1 = custom.Con2d_Class(in_planes,
                                     out_planes,
                                     kernel_size=3,
                                     stride=stride,
                                     padding=1,
                                     bias=False)
     self.bn2 = custom.BN_Class(out_planes)
     self.relu2 = nn.ReLU(inplace=True)
     self.conv2 = custom.Con2d_Class(out_planes,
                                     out_planes,
                                     kernel_size=3,
                                     stride=1,
                                     padding=1,
                                     bias=False)
     self.droprate = dropRate
     self.equalInOut = (in_planes == out_planes)
     self.convShortcut = (not self.equalInOut) and custom.Con2d_Class(
         in_planes,
         out_planes,
         kernel_size=1,
         stride=stride,
         padding=0,
         bias=False) or None
示例#2
0
    def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = custom.Con2d_Class(3,
                                        64,
                                        kernel_size=7,
                                        stride=2,
                                        padding=3,
                                        bias=False)
        self.bn1 = custom.BN_Class(64)
        self.relu = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7, stride=1)
        self.classifier = custom.Linear_Class(512 * block.expansion,
                                              num_classes)

        for m in self.modules():
            if isinstance(m, custom.Con2d_Class):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, custom.BN_Class):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
 def __init__(self, inplanes, expansion=4, growthRate=12, dropRate=0):
     super(Bottleneck, self).__init__()
     planes = expansion * growthRate
     self.bn1 = nn.BatchNorm2d(inplanes)
     self.conv1 = custom.Con2d_Class(inplanes,
                                     planes,
                                     kernel_size=1,
                                     bias=False)
     self.bn2 = nn.BatchNorm2d(planes)
     self.conv2 = custom.Con2d_Class(planes,
                                     growthRate,
                                     kernel_size=3,
                                     padding=1,
                                     bias=False)
     self.relu = nn.ReLU(inplace=True)
     self.dropRate = dropRate
示例#4
0
    def __init__(self, in_channels, out_channels, stride, cardinality,
                 widen_factor):
        """ Constructor
        Args:
            in_channels: input channel dimensionality
            out_channels: output channel dimensionality
            stride: conv stride. Replaces pooling layer.
            cardinality: num of convolution groups.
            widen_factor: factor to reduce the input dimensionality before convolution.
        """
        super(ResNeXtBottleneck, self).__init__()
        D = cardinality * out_channels // widen_factor
        self.conv_reduce = custom.Con2d_Class(in_channels,
                                              D,
                                              kernel_size=1,
                                              stride=1,
                                              padding=0,
                                              bias=False)
        self.bn_reduce = nn.BatchNorm2d(D)
        self.conv_conv = custom.Con2d_Class(D,
                                            D,
                                            kernel_size=3,
                                            stride=stride,
                                            padding=1,
                                            groups=cardinality,
                                            bias=False)
        self.bn = nn.BatchNorm2d(D)
        self.conv_expand = custom.Con2d_Class(D,
                                              out_channels,
                                              kernel_size=1,
                                              stride=1,
                                              padding=0,
                                              bias=False)
        self.bn_expand = nn.BatchNorm2d(out_channels)

        self.shortcut = nn.Sequential()
        if in_channels != out_channels:
            self.shortcut.add_module(
                'shortcut_conv',
                custom.Con2d_Class(in_channels,
                                   out_channels,
                                   kernel_size=1,
                                   stride=stride,
                                   padding=0,
                                   bias=False))
            self.shortcut.add_module('shortcut_bn',
                                     nn.BatchNorm2d(out_channels))
示例#5
0
def conv3x3(in_planes, out_planes, stride=1):
    """3x3 convolution with padding"""
    return custom.Con2d_Class(in_planes,
                              out_planes,
                              kernel_size=3,
                              stride=stride,
                              padding=1,
                              bias=False)
 def __init__(self, inplanes, outplanes):
     super(Transition, self).__init__()
     self.bn1 = nn.BatchNorm2d(inplanes)
     self.conv1 = custom.Con2d_Class(inplanes,
                                     outplanes,
                                     kernel_size=1,
                                     bias=False)
     self.relu = nn.ReLU(inplace=True)
    def __init__(self,
                 depth=22,
                 block_name='BasicBlock',
                 dropRate=0,
                 num_classes=10,
                 growthRate=12,
                 compressionRate=2):
        super(DenseNet, self).__init__()

        # Model type specifies number of layers for CIFAR-10 model
        if block_name.lower() == 'basicblock':
            assert (
                depth - 4
            ) % 3 == 0, 'When use basicblock, depth should be 3n+4, e.g. 40, 100, 190, 250'
            n = (depth - 4) // 3
            block = BasicBlock
        elif block_name.lower() == 'bottleneck':
            assert (
                depth - 4
            ) % 6 == 0, 'When use bottleneck, depth should be 6n+4, e.g. 40, 100, 190, 250'
            n = (depth - 4) // 6
            block = Bottleneck
        else:
            raise ValueError('block_name shoule be Basicblock or Bottleneck')

        self.growthRate = growthRate
        self.dropRate = dropRate

        # self.inplanes is a global variable used across multiple
        # helper functions
        self.inplanes = growthRate * 2
        self.conv1 = custom.Con2d_Class(3,
                                        self.inplanes,
                                        kernel_size=3,
                                        padding=1,
                                        bias=False)
        self.dense1 = self._make_denseblock(block, n)
        self.trans1 = self._make_transition(compressionRate)
        self.dense2 = self._make_denseblock(block, n)
        self.trans2 = self._make_transition(compressionRate)
        self.dense3 = self._make_denseblock(block, n)
        self.bn = nn.BatchNorm2d(self.inplanes)
        self.relu = nn.ReLU(inplace=True)
        self.avgpool = nn.AvgPool2d(8)
        self.classifier = custom.Linear_Class(self.inplanes, num_classes)

        # Weight initialization
        for m in self.modules():
            if isinstance(m, custom.Linear_Class) or isinstance(
                    m, custom.Con2d_Class):
                init.kaiming_normal_(m.weight)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                if m.bias is not None:
                    m.bias.data.zero_()
示例#8
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = custom.Con2d_Class(inplanes,
                                     planes,
                                     kernel_size=1,
                                     bias=False)
     self.bn1 = custom.BN_Class(planes)
     self.conv2 = custom.Con2d_Class(planes,
                                     planes,
                                     kernel_size=3,
                                     stride=stride,
                                     padding=1,
                                     bias=False)
     self.bn2 = custom.BN_Class(planes)
     self.conv3 = custom.Con2d_Class(planes,
                                     planes * 4,
                                     kernel_size=1,
                                     bias=False)
     self.bn3 = custom.BN_Class(planes * 4)
     self.relu = nn.ReLU(inplace=False)
     self.downsample = downsample
     self.stride = stride
示例#9
0
def make_layers(cfg, batch_norm=False):
    layers = []
    in_channels = 3
    for v in cfg:
        if v == 'M':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:
            conv2d = custom.Con2d_Class(in_channels,
                                        v,
                                        kernel_size=3,
                                        padding=1)
            if batch_norm:
                layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
            else:
                layers += [conv2d, nn.ReLU(inplace=True)]
            in_channels = v
    return nn.Sequential(*layers)
示例#10
0
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                custom.Con2d_Class(self.inplanes,
                                   planes * block.expansion,
                                   kernel_size=1,
                                   stride=stride,
                                   bias=False), )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)
示例#11
0
    def __init__(self,
                 cardinality,
                 depth,
                 num_classes,
                 widen_factor=4,
                 dropRate=0):
        """ Constructor
        Args:
            cardinality: number of convolution groups.
            depth: number of layers.
            num_classes: number of classes
            widen_factor: factor to adjust the channel dimensionality
        """
        super(CifarResNeXt, self).__init__()
        self.cardinality = cardinality
        self.depth = depth
        self.block_depth = (self.depth - 2) // 9
        self.widen_factor = widen_factor
        self.num_classes = num_classes
        self.output_size = 64
        self.stages = [
            64, 64 * self.widen_factor, 128 * self.widen_factor,
            256 * self.widen_factor
        ]

        self.conv_1_3x3 = custom.Con2d_Class(3, 64, 3, 1, 1, bias=False)
        self.bn_1 = nn.BatchNorm2d(64)
        self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)
        self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)
        self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)
        self.classifier = custom.Linear_Class(1024, num_classes)
        init.kaiming_normal(self.classifier.weight)

        for m in self.modules():
            if isinstance(m, custom.Linear_Class) or isinstance(
                    m, custom.Con2d_Class):
                init.kaiming_normal_(m.weight)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                if m.bias is not None:
                    m.bias.data.zero_()
示例#12
0
    def __init__(self, depth, num_classes=1000, block_name='BasicBlock'):
        super(ResNet, self).__init__()
        # Model type specifies number of layers for CIFAR-10 model
        if block_name.lower() == 'basicblock':
            assert (
                depth - 2
            ) % 6 == 0, 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
            n = (depth - 2) // 6
            block = BasicBlock
        elif block_name.lower() == 'bottleneck':
            assert (
                depth - 2
            ) % 9 == 0, 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
            n = (depth - 2) // 9
            block = Bottleneck
        else:
            raise ValueError('block_name shoule be Basicblock or Bottleneck')

        self.inplanes = 16
        self.conv1 = custom.Con2d_Class(3,
                                        16,
                                        kernel_size=3,
                                        padding=1,
                                        bias=False)
        self.bn1 = custom.BN_Class(16)
        self.relu = nn.ReLU(inplace=False)
        self.layer1 = self._make_layer(block, 16, n)
        self.layer2 = self._make_layer(block, 32, n, stride=2)
        self.layer3 = self._make_layer(block, 64, n, stride=2)
        self.avgpool = nn.AvgPool2d(8)
        self.classifier = custom.Linear_Class(64 * block.expansion,
                                              num_classes)

        for m in self.modules():
            if isinstance(m, custom.Linear_Class) or isinstance(
                    m, custom.Con2d_Class):
                init.kaiming_normal_(m.weight)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, custom.BN_Class):
                m.weight.data.fill_(1)
                if m.bias is not None:
                    m.bias.data.zero_()
示例#13
0
    def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
        super(WideResNet, self).__init__()
        nChannels = [
            16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor
        ]
        assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
        n = (depth - 4) // 6
        block = BasicBlock
        # 1st conv before any network block
        self.conv1 = custom.Con2d_Class(3,
                                        nChannels[0],
                                        kernel_size=3,
                                        stride=1,
                                        padding=1,
                                        bias=False)
        # 1st block
        self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1,
                                   dropRate)
        # 2nd block
        self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2,
                                   dropRate)
        # 3rd block
        self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2,
                                   dropRate)
        # global average pooling and classifier
        self.bn1 = custom.BN_Class(nChannels[3])
        self.relu = nn.ReLU(inplace=True)
        self.classifier = custom.Linear_Class(nChannels[3], num_classes)
        self.nChannels = nChannels[3]

        for m in self.modules():
            if isinstance(m, custom.Con2d_Class):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, custom.BN_Class):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, custom.Linear_Class):
                if m.bias is not None:
                    m.bias.data.zero_()