コード例 #1
0
    def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16), num_init_features=64,
                 norm_type='Unknown', bn_size=4, drop_rate=0, num_classes=1000, use_se=True):

        super(DenseNet, self).__init__()

        # First convolution
        self.features = nn.Sequential(OrderedDict([('conv0',
                                                    nn.Conv2d(3, num_init_features, kernel_size=7,
                                                              stride=2, padding=3, bias=False)), (
                                                   'norm0', get_norm(norm_type, num_init_features)),
                                                   ('relu0', nn.ReLU(inplace=True)), ('pool0',
                                                                                      nn.MaxPool2d(
                                                                                          kernel_size=3,
                                                                                          stride=2,
                                                                                          padding=1)), ]))

        if use_se:
            # Add SELayer at first convolution
            self.features.add_module("SELayer_0a", SEModule(channels=num_init_features))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            if use_se:
                # Add a SELayer
                self.features.add_module("SELayer_%da" % (i + 1), SEModule(channels=num_features))

            block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
                                norm_type=norm_type, bn_size=bn_size, growth_rate=growth_rate,
                                drop_rate=drop_rate)
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                if use_se:
                    # Add a SELayer behind each transition block
                    self.features.add_module("SELayer_%db" % (i + 1),
                                             SEModule(channels=num_features))

                trans = _Transition(num_input_features=num_features,
                                    num_output_features=num_features // 2, norm_type=norm_type)
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final batch norm
        self.features.add_module('norm5', get_norm(norm_type, num_features))

        # Linear layer
        self.classifier = nn.Linear(num_features, num_classes)
        self.num_features = num_features

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)
コード例 #2
0
 def __init__(self, inplanes, planes, stride=1, dilation=1):
     super(BottleneckX, self).__init__()
     cardinality = BottleneckX.cardinality
     # dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0)))
     # bottle_planes = dim * cardinality
     bottle_planes = planes * cardinality // 32
     self.conv1 = nn.Conv2d(inplanes,
                            bottle_planes,
                            kernel_size=1,
                            bias=False)
     self.bn1 = BatchNorm(bottle_planes)
     self.conv2 = nn.Conv2d(bottle_planes,
                            bottle_planes,
                            kernel_size=3,
                            stride=stride,
                            padding=dilation,
                            bias=False,
                            dilation=dilation,
                            groups=cardinality)
     self.bn2 = BatchNorm(bottle_planes)
     self.conv3 = nn.Conv2d(bottle_planes,
                            planes,
                            kernel_size=1,
                            bias=False)
     self.bn3 = BatchNorm(planes)
     self.se_module = SEModule(planes)
     self.relu = nn.ReLU(inplace=True)
     self.stride = stride
コード例 #3
0
 def __init__(self,
              inplanes,
              planes,
              groups,
              reduction,
              stride=1,
              downsample=None,
              base_width=4):
     super(SEResNeXtBottleneck, self).__init__()
     width = math.floor(planes * (base_width / 64)) * groups
     self.conv1 = nn.Conv2d(inplanes,
                            width,
                            kernel_size=1,
                            bias=False,
                            stride=1)
     self.bn1 = nn.BatchNorm2d(width)
     self.conv2 = nn.Conv2d(width,
                            width,
                            kernel_size=3,
                            stride=stride,
                            padding=1,
                            groups=groups,
                            bias=False)
     self.bn2 = nn.BatchNorm2d(width)
     self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False)
     self.bn3 = nn.BatchNorm2d(planes * 4)
     self.relu = nn.ReLU(inplace=True)
     self.se_module = SEModule(planes * 4, reduction=reduction)
     self.downsample = downsample
     self.stride = stride
コード例 #4
0
 def __init__(self,
              inplanes,
              planes,
              groups,
              reduction,
              stride=1,
              downsample=None):
     super(SEResNetBottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes,
                            planes,
                            kernel_size=1,
                            bias=False,
                            stride=stride)
     self.bn1 = nn.BatchNorm2d(planes)
     self.conv2 = nn.Conv2d(planes,
                            planes,
                            kernel_size=3,
                            padding=1,
                            groups=groups,
                            bias=False)
     self.bn2 = nn.BatchNorm2d(planes)
     self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
     self.bn3 = nn.BatchNorm2d(planes * 4)
     self.relu = nn.ReLU(inplace=True)
     self.se_module = SEModule(planes * 4, reduction=reduction)
     self.downsample = downsample
     self.stride = stride
コード例 #5
0
    def __init__(self, inplanes, planes, stride=1, downsample=None, radix=1, cardinality=1,
                 bottleneck_width=64, avd=False, avd_first=False, dilation=1, is_first=False,
                 rectified_conv=False, rectify_avg=False, norm_layer=None, dropblock_prob=0.0,
                 last_gamma=False, use_se=False):
        super(Bottleneck, self).__init__()
        group_width = int(planes * (bottleneck_width / 64.)) * cardinality
        self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False)
        self.bn1 = norm_layer(group_width)
        self.dropblock_prob = dropblock_prob
        self.radix = radix
        self.avd = avd and (stride > 1 or is_first)
        self.avd_first = avd_first

        if self.avd:
            self.avd_layer = nn.AvgPool2d(3, stride, padding=1)
            stride = 1

        if dropblock_prob > 0.0:
            self.dropblock1 = DropBlock2D(dropblock_prob, 3)
            if radix == 1:
                self.dropblock2 = DropBlock2D(dropblock_prob, 3)
            self.dropblock3 = DropBlock2D(dropblock_prob, 3)

        if radix >= 1:
            self.conv2 = SplAtConv2d(group_width, group_width, kernel_size=3, stride=stride,
                                     padding=dilation, dilation=dilation, groups=cardinality,
                                     bias=False, radix=radix, rectify=rectified_conv,
                                     rectify_avg=rectify_avg, norm_layer=norm_layer,
                                     dropblock_prob=dropblock_prob)
        elif rectified_conv:
            from rfconv import RFConv2d
            self.conv2 = RFConv2d(group_width, group_width, kernel_size=3, stride=stride,
                                  padding=dilation, dilation=dilation, groups=cardinality,
                                  bias=False, average_mode=rectify_avg)
            self.bn2 = norm_layer(group_width)
        else:
            self.conv2 = nn.Conv2d(group_width, group_width, kernel_size=3, stride=stride,
                                   padding=dilation, dilation=dilation, groups=cardinality,
                                   bias=False)
            self.bn2 = norm_layer(group_width)

        self.conv3 = nn.Conv2d(group_width, planes * 4, kernel_size=1, bias=False)
        self.bn3 = norm_layer(planes * 4)

        if last_gamma:
            from torch.nn.init import zeros_
            zeros_(self.bn3.weight)
        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.dilation = dilation
        self.stride = stride
        self.use_se = use_se

        if use_se:
            self.se = SEModule(planes * 4)