Example #1
0
 def __init__(self,
              num_input_features,
              growth_rate,
              bn_size,
              drop_rate,
              norm_type='Unknown'):
     super(_DenseLayer, self).__init__()
     self.add_module('norm1', get_norm(norm_type, num_input_features)),
     self.add_module('relu1', nn.ReLU(inplace=True)),
     self.add_module(
         'conv1',
         nn.Conv2d(num_input_features,
                   bn_size * growth_rate,
                   kernel_size=1,
                   stride=1,
                   bias=False)),
     self.add_module('norm2', get_norm(norm_type, bn_size * growth_rate)),
     self.add_module('relu2', nn.ReLU(inplace=True)),
     self.add_module(
         'conv2',
         nn.Conv2d(bn_size * growth_rate,
                   growth_rate,
                   kernel_size=3,
                   stride=1,
                   padding=1,
                   bias=False)),
     self.drop_rate = drop_rate
Example #2
0
 def __init__(self,
              in_channels,
              out_channels,
              norm_type='Unknown',
              **kwargs):
     super(BasicConv2d, self).__init__()
     self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
     self.norm = get_norm(norm_type, out_channels, eps=0.001)
Example #3
0
    def __init__(self,
                 in_ch,
                 out_ch,
                 kernel_size=3,
                 stride=1,
                 padding=0,
                 bias=True,
                 norm_type='Unknown'):
        super(Conv2dNormRelu, self).__init__()

        self.conv = nn.Sequential(
            nn.Conv2d(in_ch, out_ch, kernel_size, stride, padding, bias=bias),
            get_norm(norm_type, out_ch), nn.ReLU(inplace=True))
Example #4
0
 def __init__(self,
              num_input_features,
              num_output_features,
              norm_type='Unknown'):
     super(_Transition, self).__init__()
     self.add_module('norm', get_norm(norm_type, num_input_features))
     self.add_module('relu', nn.ReLU(inplace=True))
     self.add_module(
         'conv',
         nn.Conv2d(
             num_input_features,
             num_output_features,  # noqa
             kernel_size=1,
             stride=1,
             bias=False))
     self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
Example #5
0
def make_layers(cfg, batch_norm=False, norm_type='Unknown'):
    layers = []
    in_channels = 3
    for v in cfg:
        if v == 'M':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:
            conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
            if batch_norm:
                layers += [
                    conv2d,
                    get_norm(norm_type, v),
                    nn.ReLU(inplace=True)
                ]
            else:
                layers += [conv2d, nn.ReLU(inplace=True)]
            in_channels = v
    return nn.Sequential(*layers)
Example #6
0
    def __init__(self,
                 growth_rate=32,
                 block_config=(6, 12, 24, 16),
                 norm_type='Unknown',
                 num_init_features=64,
                 bn_size=4,
                 drop_rate=0,
                 num_classes=1000):  # noqa

        super(DenseNet, self).__init__()

        # First convolution
        self.features = nn.Sequential(
            OrderedDict([
                ('conv0',
                 nn.Conv2d(3,
                           num_init_features,
                           kernel_size=7,
                           stride=2,
                           padding=3,
                           bias=False)),  # noqa
                ('norm0', get_norm(norm_type, num_init_features)),
                ('relu0', nn.ReLU(inplace=True)),
                ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
            ]))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(
                num_layers=num_layers,
                num_input_features=num_features,
                norm_type=norm_type,  # noqa
                bn_size=bn_size,
                growth_rate=growth_rate,
                drop_rate=drop_rate)  # noqa
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features,
                                    num_output_features=num_features // 2,
                                    norm_type=norm_type)  # noqa
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final batch norm
        self.features.add_module('norm5', get_norm(norm_type, num_features))

        # Linear layer
        self.classifier = nn.Linear(num_features, num_classes)
        self.num_features = num_features

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.GroupNorm):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.InstanceNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)