Ejemplo n.º 1
0
    def __init__(self, config, anchors, num_cls, growth_rate=32, block_config=(6, 12, 24, 16), num_init_features=64, bn_size=4, drop_rate=0):
        nn.Module.__init__(self)

        # First convolution
        self.layers = nn.Sequential(OrderedDict([
            ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
            ('norm0', nn.BatchNorm2d(num_init_features)),
            ('relu0', nn.ReLU(inplace=True)),
            ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
        ]))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
            self.layers.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
                self.layers.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final batch norm
        self.layers.add_module('norm5', nn.BatchNorm2d(num_features))

        self.layers.add_module('conv', nn.Conv2d(num_features, model.output_channels(len(anchors), num_cls), 1))
Ejemplo n.º 2
0
 def __init__(self, config, anchors, num_cls):
     nn.Module.__init__(self)
     self.layers = nn.Sequential(
         BasicConv2d(3, 32, kernel_size=3, stride=2),
         BasicConv2d(32, 32, kernel_size=3, stride=1),
         BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1),
         Mixed_3a(),
         Mixed_4a(),
         Mixed_5a(),
         Inception_A(),
         Inception_A(),
         Inception_A(),
         Inception_A(),
         Reduction_A(),  # Mixed_6a
         Inception_B(),
         Inception_B(),
         Inception_B(),
         Inception_B(),
         Inception_B(),
         Inception_B(),
         Inception_B(),
         Reduction_B(),  # Mixed_7a
         Inception_C(),
         Inception_C(),
         Inception_C(),
         nn.Conv2d(1536, model.output_channels(len(anchors), num_cls), 1),
     )
Ejemplo n.º 3
0
    def __init__(self, config_channels, anchors, num_cls, growth_rate=32, block_config=(6, 12, 24, 16), num_init_features=64, bn_size=4, drop_rate=0):
        nn.Module.__init__(self)

        # First convolution
        self.features = nn.Sequential(OrderedDict([
            ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
            ('norm0', nn.BatchNorm2d(num_init_features)),
            ('relu0', nn.ReLU(inplace=True)),
            ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
        ]))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
            self.features.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
                self.features.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final batch norm
        self.features.add_module('norm5', nn.BatchNorm2d(num_features))
        self.features.add_module('conv', nn.Conv2d(num_features, model.output_channels(len(anchors), num_cls), 1))

        # init
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight = nn.init.kaiming_normal(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Ejemplo n.º 4
0
    def __init__(self, config_channels, anchors, num_cls):
        nn.Module.__init__(self)

        self.layers = nn.Sequential(
            conv_bn(  3,  32, 2),
            conv_unit(32, 64, 1),
            conv_unit(64, 128, 2),
            conv_unit(128, 128, 1),
            conv_unit(128, 256, 2),
            conv_unit(256, 256, 1),
            conv_unit(256, 512, 2),
            conv_unit(512, 512, 1),
            conv_unit(512, 512, 1),
            conv_unit(512, 512, 1),
            conv_unit(512, 512, 1),
            conv_unit(512, 512, 1),
            conv_unit(512, 1024, 2),
            conv_unit(1024, 1024, 1),
            nn.Conv2d(1024, model.output_channels(len(anchors), num_cls), 1)
        )

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight = nn.init.kaiming_normal(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Ejemplo n.º 5
0
    def __init__(self, config_channels, anchors, num_cls):
        nn.Module.__init__(self)
        layers = []
        layers.append(conv_bn(config_channels.channels, config_channels(32, 'layers.%d.conv.weight' % len(layers)), 2))
        layers.append(conv_unit(config_channels.channels, config_channels(64, 'layers.%d.pw.conv.weight' % len(layers)), 1))
        layers.append(conv_unit(config_channels.channels, config_channels(128, 'layers.%d.pw.conv.weight' % len(layers)), 2))
        layers.append(conv_unit(config_channels.channels, config_channels(128, 'layers.%d.pw.conv.weight' % len(layers)), 1))
        layers.append(conv_unit(config_channels.channels, config_channels(256, 'layers.%d.pw.conv.weight' % len(layers)), 2))
        layers.append(conv_unit(config_channels.channels, config_channels(256, 'layers.%d.pw.conv.weight' % len(layers)), 1))
        layers.append(conv_unit(config_channels.channels, config_channels(512, 'layers.%d.pw.conv.weight' % len(layers)), 2))
        layers.append(conv_unit(config_channels.channels, config_channels(512, 'layers.%d.pw.conv.weight' % len(layers)), 1))
        layers.append(conv_unit(config_channels.channels, config_channels(512, 'layers.%d.pw.conv.weight' % len(layers)), 1))
        layers.append(conv_unit(config_channels.channels, config_channels(512, 'layers.%d.pw.conv.weight' % len(layers)), 1))
        layers.append(conv_unit(config_channels.channels, config_channels(512, 'layers.%d.pw.conv.weight' % len(layers)), 1))
        layers.append(conv_unit(config_channels.channels, config_channels(512, 'layers.%d.pw.conv.weight' % len(layers)), 1))
        layers.append(conv_unit(config_channels.channels, config_channels(1024, 'layers.%d.pw.conv.weight' % len(layers)), 2))
        layers.append(conv_unit(config_channels.channels, config_channels(1024, 'layers.%d.pw.conv.weight' % len(layers)), 1))
        layers.append(nn.Conv2d(config_channels.channels, model.output_channels(len(anchors), num_cls), 1))
        self.layers = nn.Sequential(*layers)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight = nn.init.kaiming_normal(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Ejemplo n.º 6
0
    def __init__(self, config_channels, anchors, num_cls):
        nn.Module.__init__(self)
        channels = 16
        layers = []

        for _ in range(5):
            layers.append(Conv2d_BatchNorm(config_channels.channels, config_channels(channels, 'layers.%d.conv.weight' % len(layers)), 3, same_padding=True))
            layers.append(nn.MaxPool2d(kernel_size=2))
            channels *= 2
        layers.append(Conv2d_BatchNorm(config_channels.channels, config_channels(channels, 'layers.%d.conv.weight' % len(layers)), 3, same_padding=True))
        layers.append(nn.ConstantPad2d((0, 1, 0, 1), float(np.finfo(np.float32).min)))
        layers.append(nn.MaxPool2d(kernel_size=2, stride=1))
        channels *= 2
        for _ in range(2):
            layers.append(Conv2d_BatchNorm(config_channels.channels, config_channels(channels, 'layers.%d.conv.weight' % len(layers)), 3, same_padding=True))
        layers.append(Conv2d(config_channels.channels, model.output_channels(len(anchors), num_cls), 1, act=False))
        self.layers = nn.Sequential(*layers)

        # init
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight = nn.init.xavier_normal(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Ejemplo n.º 7
0
 def __init__(self, config_channels, anchors, num_cls, ratio=1):
     nn.Module.__init__(self)
     features = []
     bn = config_channels.config.getboolean('batch_norm', 'enable')
     features.append(Conv2d(config_channels.channels, config_channels(32, 'features.%d.conv.weight' % len(features)), kernel_size=3, stride=2, bn=bn))
     features.append(Conv2d(config_channels.channels, config_channels(32, 'features.%d.conv.weight' % len(features)), kernel_size=3, stride=1, bn=bn))
     features.append(Conv2d(config_channels.channels, config_channels(64, 'features.%d.conv.weight' % len(features)), kernel_size=3, stride=1, padding=1, bn=bn))
     features.append(Mixed_3a(config_channels, 'features.%d' % len(features), bn=bn, ratio=ratio))
     features.append(Mixed_4a(config_channels, 'features.%d' % len(features), bn=bn, ratio=ratio))
     features.append(Mixed_5a(config_channels, 'features.%d' % len(features), bn=bn, ratio=ratio))
     features.append(Inception_A(config_channels, 'features.%d' % len(features), bn=bn, ratio=ratio))
     features.append(Inception_A(config_channels, 'features.%d' % len(features), bn=bn, ratio=ratio))
     features.append(Inception_A(config_channels, 'features.%d' % len(features), bn=bn, ratio=ratio))
     features.append(Inception_A(config_channels, 'features.%d' % len(features), bn=bn, ratio=ratio))
     features.append(Reduction_A(config_channels, 'features.%d' % len(features), bn=bn, ratio=ratio)) # Mixed_6a
     features.append(Inception_B(config_channels, 'features.%d' % len(features), bn=bn, ratio=ratio))
     features.append(Inception_B(config_channels, 'features.%d' % len(features), bn=bn, ratio=ratio))
     features.append(Inception_B(config_channels, 'features.%d' % len(features), bn=bn, ratio=ratio))
     features.append(Inception_B(config_channels, 'features.%d' % len(features), bn=bn, ratio=ratio))
     features.append(Inception_B(config_channels, 'features.%d' % len(features), bn=bn, ratio=ratio))
     features.append(Inception_B(config_channels, 'features.%d' % len(features), bn=bn, ratio=ratio))
     features.append(Inception_B(config_channels, 'features.%d' % len(features), bn=bn, ratio=ratio))
     features.append(Reduction_B(config_channels, 'features.%d' % len(features), bn=bn, ratio=ratio)) # Mixed_7a
     features.append(Inception_C(config_channels, 'features.%d' % len(features), bn=bn, ratio=ratio))
     features.append(Inception_C(config_channels, 'features.%d' % len(features), bn=bn, ratio=ratio))
     features.append(Inception_C(config_channels, 'features.%d' % len(features), bn=bn, ratio=ratio))
     features.append(nn.Conv2d(config_channels.channels, model.output_channels(len(anchors), num_cls), 1))
     self.features = nn.Sequential(*features)
     self.init(config_channels)
Ejemplo n.º 8
0
    def __init__(self, config, anchors, num_cls, transform_input=False):
        nn.Module.__init__(self)
        self.transform_input = transform_input
        self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
        self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
        self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
        self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
        self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
        self.Mixed_5b = InceptionA(192, pool_features=32)
        self.Mixed_5c = InceptionA(256, pool_features=64)
        self.Mixed_5d = InceptionA(288, pool_features=64)
        self.Mixed_6a = InceptionB(288)
        self.Mixed_6b = InceptionC(768, channels_7x7=128)
        self.Mixed_6c = InceptionC(768, channels_7x7=160)
        self.Mixed_6d = InceptionC(768, channels_7x7=160)
        self.Mixed_6e = InceptionC(768, channels_7x7=192)
        # aux_logits
        self.Mixed_7a = InceptionD(768)
        self.Mixed_7b = InceptionE(1280)
        self.Mixed_7c = InceptionE(2048)
        self.conv = nn.Conv2d(2048, model.output_channels(len(anchors), num_cls), 1)

        for m in self.modules():
            if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
                stddev = m.stddev if hasattr(m, 'stddev') else 0.1
                X = stats.truncnorm(-2, 2, scale=stddev)
                values = torch.Tensor(X.rvs(m.weight.data.numel()))
                m.weight.data.copy_(values)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Ejemplo n.º 9
0
    def __init__(self, config, anchors, num_cls, block, layers):
        self.inplanes = 64
        nn.Module.__init__(self)
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.conv = nn.Conv2d(512,
                              model.output_channels(len(anchors), num_cls), 1)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Ejemplo n.º 10
0
    def __init__(self, config_channels, anchors, num_cls, stride=2):
        nn.Module.__init__(self)
        self.stride = stride
        channels = 32
        layers = []

        # layers1
        for _ in range(2):
            layers.append(Conv2d_BatchNorm(config_channels.channels, config_channels(channels, 'layers1.%d.conv.weight' % len(layers)), 3, same_padding=True))
            layers.append(nn.MaxPool2d(kernel_size=2))
            channels *= 2
        # down 4
        for _ in range(2):
            layers.append(Conv2d_BatchNorm(config_channels.channels, config_channels(channels, 'layers1.%d.conv.weight' % len(layers)), 3, same_padding=True))
            layers.append(Conv2d_BatchNorm(config_channels.channels, config_channels(channels // 2, 'layers1.%d.conv.weight' % len(layers)), 1))
            layers.append(Conv2d_BatchNorm(config_channels.channels, config_channels(channels, 'layers1.%d.conv.weight' % len(layers)), 3, same_padding=True))
            layers.append(nn.MaxPool2d(kernel_size=2))
            channels *= 2
        # down 16
        for _ in range(2):
            layers.append(Conv2d_BatchNorm(config_channels.channels, config_channels(channels, 'layers1.%d.conv.weight' % len(layers)), 3, same_padding=True))
            layers.append(Conv2d_BatchNorm(config_channels.channels, config_channels(channels // 2, 'layers1.%d.conv.weight' % len(layers)), 1))
        layers.append(Conv2d_BatchNorm(config_channels.channels, config_channels(channels, 'layers1.%d.conv.weight' % len(layers)), 3, same_padding=True))
        self.layers1 = nn.Sequential(*layers)

        # layers2
        layers = []
        layers.append(nn.MaxPool2d(kernel_size=2))
        channels *= 2
        # down 32
        for _ in range(2):
            layers.append(Conv2d_BatchNorm(config_channels.channels, config_channels(channels, 'layers2.%d.conv.weight' % len(layers)), 3, same_padding=True))
            layers.append(Conv2d_BatchNorm(config_channels.channels, config_channels(channels // 2, 'layers2.%d.conv.weight' % len(layers)), 1))
        for _ in range(3):
            layers.append(Conv2d_BatchNorm(config_channels.channels, config_channels(channels, 'layers2.%d.conv.weight' % len(layers)), 3, same_padding=True))
        self.layers2 = nn.Sequential(*layers)

        self.passthrough = Conv2d_BatchNorm(self.layers1[-1].conv.weight.size(0), config_channels(64, 'passthrough.conv.weight'), 1)

        # layers3
        layers = []
        layers.append(Conv2d_BatchNorm(self.passthrough.conv.weight.size(0) * self.stride * self.stride + self.layers2[-1].conv.weight.size(0), config_channels(1024, 'layers3.%d.conv.weight' % len(layers)), 3, same_padding=True))
        layers.append(Conv2d(config_channels.channels, model.output_channels(len(anchors), num_cls), 1, act=False))
        self.layers3 = nn.Sequential(*layers)

        # init
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight = nn.init.kaiming_normal(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Ejemplo n.º 11
0
    def __init__(self, config_channels, anchors, num_cls, stride=2):
        nn.Module.__init__(self)
        self.stride = stride
        channels = 32
        layers = []

        # layers1
        for _ in range(2):
            layers.append(Conv2d_BatchNorm(config_channels.channels, config_channels(channels, 'layers1.%d.conv.weight' % len(layers)), 3, same_padding=True))
            layers.append(nn.MaxPool2d(kernel_size=2))
            channels *= 2
        # down 4
        for _ in range(2):
            layers.append(Conv2d_BatchNorm(config_channels.channels, config_channels(channels, 'layers1.%d.conv.weight' % len(layers)), 3, same_padding=True))
            layers.append(Conv2d_BatchNorm(config_channels.channels, config_channels(channels // 2, 'layers1.%d.conv.weight' % len(layers)), 1))
            layers.append(Conv2d_BatchNorm(config_channels.channels, config_channels(channels, 'layers1.%d.conv.weight' % len(layers)), 3, same_padding=True))
            layers.append(nn.MaxPool2d(kernel_size=2))
            channels *= 2
        # down 16
        for _ in range(2):
            layers.append(Conv2d_BatchNorm(config_channels.channels, config_channels(channels, 'layers1.%d.conv.weight' % len(layers)), 3, same_padding=True))
            layers.append(Conv2d_BatchNorm(config_channels.channels, config_channels(channels // 2, 'layers1.%d.conv.weight' % len(layers)), 1))
        layers.append(Conv2d_BatchNorm(config_channels.channels, config_channels(channels, 'layers1.%d.conv.weight' % len(layers)), 3, same_padding=True))
        self.layers1 = nn.Sequential(*layers)

        # layers2
        layers = []
        layers.append(nn.MaxPool2d(kernel_size=2))
        channels *= 2
        # down 32
        for _ in range(2):
            layers.append(Conv2d_BatchNorm(config_channels.channels, config_channels(channels, 'layers2.%d.conv.weight' % len(layers)), 3, same_padding=True))
            layers.append(Conv2d_BatchNorm(config_channels.channels, config_channels(channels // 2, 'layers2.%d.conv.weight' % len(layers)), 1))
        for _ in range(3):
            layers.append(Conv2d_BatchNorm(config_channels.channels, config_channels(channels, 'layers2.%d.conv.weight' % len(layers)), 3, same_padding=True))
        self.layers2 = nn.Sequential(*layers)

        self.passthrough = Conv2d_BatchNorm(self.layers1[-1].conv.weight.size(0), config_channels(64, 'passthrough.conv.weight'), 1)

        # layers3
        layers = []
        layers.append(Conv2d_BatchNorm(self.passthrough.conv.weight.size(0) * self.stride * self.stride + self.layers2[-1].conv.weight.size(0), config_channels(1024, 'layers3.%d.conv.weight' % len(layers)), 3, same_padding=True))
        layers.append(Conv2d(config_channels.channels, model.output_channels(len(anchors), num_cls), 1, act=False))
        self.layers3 = nn.Sequential(*layers)

        # init
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight = nn.init.kaiming_normal(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Ejemplo n.º 12
0
    def __init__(self, config_channels, anchors, num_cls):
        nn.Module.__init__(self)
        channels = 16
        layers = []

        for _ in range(5):
            layers.append(
                Conv2d_BatchNorm(config_channels.channels,
                                 config_channels(
                                     channels,
                                     'layers.%d.conv.weight' % len(layers)),
                                 3,
                                 same_padding=True))
            layers.append(nn.MaxPool2d(kernel_size=2))
            channels *= 2
        layers.append(
            Conv2d_BatchNorm(config_channels.channels,
                             config_channels(
                                 channels,
                                 'layers.%d.conv.weight' % len(layers)),
                             3,
                             same_padding=True))
        layers.append(
            nn.ConstantPad2d((0, 1, 0, 1), float(np.finfo(np.float32).min)))
        layers.append(nn.MaxPool2d(kernel_size=2, stride=1))
        channels *= 2
        for _ in range(2):
            layers.append(
                Conv2d_BatchNorm(config_channels.channels,
                                 config_channels(
                                     channels,
                                     'layers.%d.conv.weight' % len(layers)),
                                 3,
                                 same_padding=True))
        layers.append(
            Conv2d(config_channels.channels,
                   model.output_channels(len(anchors), num_cls),
                   1,
                   act=False))
        self.layers = nn.Sequential(*layers)

        # init
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight = nn.init.xavier_normal(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Ejemplo n.º 13
0
    def __init__(self, config, anchors, num_cls):
        nn.Module.__init__(self)

        self.layers = nn.Sequential(
            conv_bn(3, 32, 2), conv_unit(32, 64, 1), conv_unit(64, 128, 2),
            conv_unit(128, 128, 1), conv_unit(128, 256, 2),
            conv_unit(256, 256, 1), conv_unit(256, 512, 2),
            conv_unit(512, 512, 1), conv_unit(512, 512, 1),
            conv_unit(512, 512, 1), conv_unit(512, 512, 1),
            conv_unit(512, 512, 1), conv_unit(512, 1024, 2),
            conv_unit(1024, 1024, 1),
            nn.Conv2d(1024, model.output_channels(len(anchors), num_cls), 1))

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight = nn.init.kaiming_normal(m.weight)
Ejemplo n.º 14
0
    def __init__(self, config, anchors, num_cls):
        nn.Module.__init__(self)
        self.features = nn.Sequential(
            BasicConv2d(3, 32, kernel_size=3, stride=2),
            BasicConv2d(32, 32, kernel_size=3, stride=1),
            BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1),
            Mixed_3a(),
            Mixed_4a(),
            Mixed_5a(),
            Inception_A(),
            Inception_A(),
            Inception_A(),
            Inception_A(),
            Reduction_A(),  # Mixed_6a
            Inception_B(),
            Inception_B(),
            Inception_B(),
            Inception_B(),
            Inception_B(),
            Inception_B(),
            Inception_B(),
            Reduction_B(),  # Mixed_7a
            Inception_C(),
            Inception_C(),
            Inception_C(),
            nn.Conv2d(1536, model.output_channels(len(anchors), num_cls), 1),
        )

        gamma = config.getboolean('batch_norm', 'gamma')
        beta = config.getboolean('batch_norm', 'beta')
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight = nn.init.kaiming_normal(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
                m.weight.requires_grad = gamma
                m.bias.requires_grad = beta

        if config.getboolean('model', 'pretrained'):
            settings = pretrained_settings['inceptionv4'][config.get('inception4', 'pretrained')]
            logging.info('use pretrained model: ' + str(settings))
            state_dict = self.state_dict()
            for key, value in torch.utils.model_zoo.load_url(settings['url']).items():
                if key in state_dict:
                    state_dict[key] = value
            self.load_state_dict(state_dict)
Ejemplo n.º 15
0
    def __init__(self, config_channels, anchors, num_cls, stride=2, ratio=1):
        nn.Module.__init__(self)
        self.stride = stride
        channels = int(32 * ratio)
        layers = []

        bn = config_channels.config.getboolean('batch_norm', 'enable')
        # layers1
        for _ in range(2):
            layers.append(Conv2d(config_channels.channels, config_channels(channels, 'layers1.%d.conv.weight' % len(layers)), 3, bn=bn, padding=True))
            layers.append(nn.MaxPool2d(kernel_size=2))
            channels *= 2
        # down 4
        for _ in range(2):
            layers.append(Conv2d(config_channels.channels, config_channels(channels, 'layers1.%d.conv.weight' % len(layers)), 3, bn=bn, padding=True))
            layers.append(Conv2d(config_channels.channels, config_channels(channels // 2, 'layers1.%d.conv.weight' % len(layers)), 1, bn=bn))
            layers.append(Conv2d(config_channels.channels, config_channels(channels, 'layers1.%d.conv.weight' % len(layers)), 3, bn=bn, padding=True))
            layers.append(nn.MaxPool2d(kernel_size=2))
            channels *= 2
        # down 16
        for _ in range(2):
            layers.append(Conv2d(config_channels.channels, config_channels(channels, 'layers1.%d.conv.weight' % len(layers)), 3, bn=bn, padding=True))
            layers.append(Conv2d(config_channels.channels, config_channels(channels // 2, 'layers1.%d.conv.weight' % len(layers)), 1, bn=bn))
        layers.append(Conv2d(config_channels.channels, config_channels(channels, 'layers1.%d.conv.weight' % len(layers)), 3, bn=bn, padding=True))
        self.layers1 = nn.Sequential(*layers)

        # layers2
        layers = []
        layers.append(nn.MaxPool2d(kernel_size=2))
        channels *= 2
        # down 32
        for _ in range(2):
            layers.append(Conv2d(config_channels.channels, config_channels(channels, 'layers2.%d.conv.weight' % len(layers)), 3, bn=bn, padding=True))
            layers.append(Conv2d(config_channels.channels, config_channels(channels // 2, 'layers2.%d.conv.weight' % len(layers)), 1, bn=bn))
        for _ in range(3):
            layers.append(Conv2d(config_channels.channels, config_channels(channels, 'layers2.%d.conv.weight' % len(layers)), 3, bn=bn, padding=True))
        self.layers2 = nn.Sequential(*layers)

        self.passthrough = Conv2d(self.layers1[-1].conv.weight.size(0), config_channels(int(64 * ratio), 'passthrough.conv.weight'), 1, bn=bn)

        # layers3
        layers = []
        layers.append(Conv2d(self.passthrough.conv.weight.size(0) * self.stride * self.stride + self.layers2[-1].conv.weight.size(0), config_channels(int(1024 * ratio), 'layers3.%d.conv.weight' % len(layers)), 3, bn=bn, padding=True))
        layers.append(Conv2d(config_channels.channels, model.output_channels(len(anchors), num_cls), 1, bn=False, act=False))
        self.layers3 = nn.Sequential(*layers)

        self.init()
Ejemplo n.º 16
0
    def __init__(self, config_channels, anchors, num_cls):
        nn.Module.__init__(self)
        self.features = nn.Sequential(
            BasicConv2d(3, 32, kernel_size=3, stride=2),
            BasicConv2d(32, 32, kernel_size=3, stride=1),
            BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1),
            Mixed_3a(),
            Mixed_4a(),
            Mixed_5a(),
            Inception_A(),
            Inception_A(),
            Inception_A(),
            Inception_A(),
            Reduction_A(),  # Mixed_6a
            Inception_B(),
            Inception_B(),
            Inception_B(),
            Inception_B(),
            Inception_B(),
            Inception_B(),
            Inception_B(),
            Reduction_B(),  # Mixed_7a
            Inception_C(),
            Inception_C(),
            Inception_C(),
            nn.Conv2d(1536, model.output_channels(len(anchors), num_cls), 1),
        )

        gamma = config_channels.config.getboolean('batch_norm', 'gamma')
        beta = config_channels.config.getboolean('batch_norm', 'beta')
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight = nn.init.kaiming_normal(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
                m.weight.requires_grad = gamma
                m.bias.requires_grad = beta

        if config_channels.config.getboolean('model', 'pretrained'):
            settings = pretrained_settings['inceptionv4'][config_channels.config.get('inception4', 'pretrained')]
            logging.info('use pretrained model: ' + str(settings))
            state_dict = self.state_dict()
            for key, value in torch.utils.model_zoo.load_url(settings['url']).items():
                if key in state_dict:
                    state_dict[key] = value
            self.load_state_dict(state_dict)
Ejemplo n.º 17
0
    def __init__(self,
                 config_channels,
                 anchors,
                 num_cls,
                 transform_input=False):
        nn.Module.__init__(self)
        self.transform_input = transform_input
        self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
        self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
        self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
        self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
        self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
        self.Mixed_5b = InceptionA(192, pool_features=32)
        self.Mixed_5c = InceptionA(256, pool_features=64)
        self.Mixed_5d = InceptionA(288, pool_features=64)
        self.Mixed_6a = InceptionB(288)
        self.Mixed_6b = InceptionC(768, channels_7x7=128)
        self.Mixed_6c = InceptionC(768, channels_7x7=160)
        self.Mixed_6d = InceptionC(768, channels_7x7=160)
        self.Mixed_6e = InceptionC(768, channels_7x7=192)
        # aux_logits
        self.Mixed_7a = InceptionD(768)
        self.Mixed_7b = InceptionE(1280)
        self.Mixed_7c = InceptionE(2048)
        self.conv = nn.Conv2d(2048,
                              model.output_channels(len(anchors), num_cls), 1)

        for m in self.modules():
            if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
                stddev = m.stddev if hasattr(m, 'stddev') else 0.1
                X = stats.truncnorm(-2, 2, scale=stddev)
                values = torch.Tensor(X.rvs(m.weight.data.numel()))
                m.weight.data.copy_(values)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

        if config_channels.config.getboolean('model', 'pretrained'):
            url = _model.model_urls['inception_v3_google']
            logging.info('use pretrained model: ' + url)
            state_dict = self.state_dict()
            for key, value in torch.utils.model_zoo.load_url(url).items():
                if key in state_dict:
                    state_dict[key] = value
            self.load_state_dict(state_dict)
    def __init__(self, config_channels, anchors, num_cls, block, layers):
        nn.Module.__init__(self)
        self.conv1 = nn.Conv2d(config_channels.channels, config_channels(64, 'conv1.weight'), kernel_size=7, stride=2, padding=3, bias=False)
        self.bn1 = nn.BatchNorm2d(config_channels.channels)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(config_channels, 'layer1', block, 64, layers[0])
        self.layer2 = self._make_layer(config_channels, 'layer2', block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(config_channels, 'layer3', block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(config_channels, 'layer4', block, 512, layers[3], stride=2)
        self.conv = nn.Conv2d(config_channels.channels, model.output_channels(len(anchors), num_cls), 1)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight = nn.init.kaiming_normal(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Ejemplo n.º 19
0
    def __init__(self, config_channels, anchors, num_cls, channels=16):
        nn.Module.__init__(self)
        layers = []

        bn = config_channels.config.getboolean('batch_norm', 'enable')
        for _ in range(5):
            layers.append(
                Conv2d(config_channels.channels,
                       config_channels(channels,
                                       'layers.%d.conv.weight' % len(layers)),
                       3,
                       bn=bn,
                       padding=True))
            layers.append(nn.MaxPool2d(kernel_size=2))
            channels *= 2
        layers.append(
            Conv2d(config_channels.channels,
                   config_channels(channels,
                                   'layers.%d.conv.weight' % len(layers)),
                   3,
                   bn=bn,
                   padding=True))
        layers.append(
            nn.ConstantPad2d((0, 1, 0, 1), float(np.finfo(np.float32).min)))
        layers.append(nn.MaxPool2d(kernel_size=2, stride=1))
        channels *= 2
        for _ in range(2):
            layers.append(
                Conv2d(config_channels.channels,
                       config_channels(channels,
                                       'layers.%d.conv.weight' % len(layers)),
                       3,
                       bn=bn,
                       padding=True))
        layers.append(
            Conv2d(config_channels.channels,
                   model.output_channels(len(anchors), num_cls),
                   1,
                   bn=False,
                   act=False))
        self.layers = nn.Sequential(*layers)

        self.init()
Ejemplo n.º 20
0
    def __init__(self, config_channels, anchors, num_cls, block, layers):
        self.inplanes = 64
        nn.Module.__init__(self)
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.conv = nn.Conv2d(512, model.output_channels(len(anchors), num_cls), 1)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Ejemplo n.º 21
0
    def __init__(self, config_channels, anchors, num_cls, transform_input=False):
        nn.Module.__init__(self)
        self.transform_input = transform_input
        self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
        self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
        self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
        self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
        self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
        self.Mixed_5b = InceptionA(192, pool_features=32)
        self.Mixed_5c = InceptionA(256, pool_features=64)
        self.Mixed_5d = InceptionA(288, pool_features=64)
        self.Mixed_6a = InceptionB(288)
        self.Mixed_6b = InceptionC(768, channels_7x7=128)
        self.Mixed_6c = InceptionC(768, channels_7x7=160)
        self.Mixed_6d = InceptionC(768, channels_7x7=160)
        self.Mixed_6e = InceptionC(768, channels_7x7=192)
        # aux_logits
        self.Mixed_7a = InceptionD(768)
        self.Mixed_7b = InceptionE(1280)
        self.Mixed_7c = InceptionE(2048)
        self.conv = nn.Conv2d(2048, model.output_channels(len(anchors), num_cls), 1)

        for m in self.modules():
            if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
                stddev = m.stddev if hasattr(m, 'stddev') else 0.1
                X = stats.truncnorm(-2, 2, scale=stddev)
                values = torch.Tensor(X.rvs(m.weight.data.numel()))
                m.weight.data.copy_(values)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

        if config_channels.config.getboolean('model', 'pretrained'):
            url = _model.model_urls['inception_v3_google']
            logging.info('use pretrained model: ' + url)
            state_dict = self.state_dict()
            for key, value in torch.utils.model_zoo.load_url(url).items():
                if key in state_dict:
                    state_dict[key] = value
            self.load_state_dict(state_dict)
Ejemplo n.º 22
0
    def __init__(self, config, anchors, num_cls):
        nn.Module.__init__(self)
        channels_in = 3
        channels_out = 16
        layers = []

        for _ in range(5):
            layers.append(
                Conv2d_BatchNorm(channels_in,
                                 channels_out,
                                 3,
                                 same_padding=True))
            channels_in = layers[-1].conv.weight.size(0)
            layers.append(nn.MaxPool2d(kernel_size=2))
            channels_out *= 2
        layers.append(
            Conv2d_BatchNorm(channels_in, channels_out, 3, same_padding=True))
        channels_in = layers[-1].conv.weight.size(0)
        layers.append(
            nn.ConstantPad2d((0, 1, 0, 1), float(np.finfo(np.float32).min)))
        layers.append(nn.MaxPool2d(kernel_size=2, stride=1))
        channels_out *= 2
        for _ in range(2):
            layers.append(
                Conv2d_BatchNorm(channels_in,
                                 channels_out,
                                 3,
                                 same_padding=True))
            channels_in = layers[-1].conv.weight.size(0)
        layers.append(
            Conv2d(channels_in,
                   model.output_channels(len(anchors), num_cls),
                   1,
                   act=False))
        self.layers = nn.Sequential(*layers)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight = nn.init.xavier_normal(m.weight)
Ejemplo n.º 23
0
 def __init__(self, config_channels, anchors, num_cls, features):
     nn.Module.__init__(self)
     self.features = features
     self.conv = nn.Conv2d(config_channels.channels,
                           model.output_channels(len(anchors), num_cls), 1)
     self._initialize_weights()
Ejemplo n.º 24
0
    def __init__(self, config, anchors, num_cls, stride=2):
        nn.Module.__init__(self)
        self.stride = stride
        channels_in = 3
        channels_out = 32
        layers = []

        # layers1
        for _ in range(2):
            layers.append(
                Conv2d_BatchNorm(channels_in,
                                 channels_out,
                                 3,
                                 same_padding=True))
            channels_in = layers[-1].conv.weight.size(0)
            layers.append(nn.MaxPool2d(kernel_size=2))
            channels_out *= 2
        # down 4
        for _ in range(2):
            layers.append(
                Conv2d_BatchNorm(channels_in,
                                 channels_out,
                                 3,
                                 same_padding=True))
            channels_in = layers[-1].conv.weight.size(0)
            layers.append(Conv2d_BatchNorm(channels_in, channels_out // 2, 1))
            channels_in = layers[-1].conv.weight.size(0)
            layers.append(
                Conv2d_BatchNorm(channels_in,
                                 channels_out,
                                 3,
                                 same_padding=True))
            channels_in = layers[-1].conv.weight.size(0)
            layers.append(nn.MaxPool2d(kernel_size=2))
            channels_out *= 2
        # down 16
        for _ in range(2):
            layers.append(
                Conv2d_BatchNorm(channels_in,
                                 channels_out,
                                 3,
                                 same_padding=True))
            channels_in = layers[-1].conv.weight.size(0)
            layers.append(Conv2d_BatchNorm(channels_in, channels_out // 2, 1))
            channels_in = layers[-1].conv.weight.size(0)
        layers.append(
            Conv2d_BatchNorm(channels_in, channels_out, 3, same_padding=True))
        channels_in = layers[-1].conv.weight.size(0)
        self.layers1 = nn.Sequential(*layers)

        # layers2
        layers = []
        layers.append(nn.MaxPool2d(kernel_size=2))
        channels_out *= 2
        # down 32
        for _ in range(2):
            layers.append(
                Conv2d_BatchNorm(channels_in,
                                 channels_out,
                                 3,
                                 same_padding=True))
            channels_in = layers[-1].conv.weight.size(0)
            layers.append(Conv2d_BatchNorm(channels_in, channels_out // 2, 1))
            channels_in = layers[-1].conv.weight.size(0)
        for _ in range(3):
            layers.append(
                Conv2d_BatchNorm(channels_in,
                                 channels_out,
                                 3,
                                 same_padding=True))
            channels_in = layers[-1].conv.weight.size(0)
        self.layers2 = nn.Sequential(*layers)

        self.passthrough = Conv2d_BatchNorm(
            self.layers1[-1].conv.weight.size(0), 64, 1)

        # layers3
        layers = []
        channels_in += self.passthrough.conv.weight.size(
            0) * self.stride * self.stride  # reorg
        layers.append(Conv2d_BatchNorm(channels_in, 1024, 3,
                                       same_padding=True))
        channels_in = layers[-1].conv.weight.size(0)
        layers.append(
            Conv2d(channels_in,
                   model.output_channels(len(anchors), num_cls),
                   1,
                   act=False))
        self.layers3 = nn.Sequential(*layers)

        # init
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight = nn.init.xavier_normal(m.weight)
Ejemplo n.º 25
0
    def __init__(self, config_channels, anchors, num_cls):
        nn.Module.__init__(self)
        features = []
        features.append(
            BasicConv2d(config_channels.channels,
                        config_channels(
                            32, 'features.%d.conv.weight' % len(features)),
                        kernel_size=3,
                        stride=2))
        features.append(
            BasicConv2d(config_channels.channels,
                        config_channels(
                            32, 'features.%d.conv.weight' % len(features)),
                        kernel_size=3,
                        stride=1))
        features.append(
            BasicConv2d(config_channels.channels,
                        config_channels(
                            64, 'features.%d.conv.weight' % len(features)),
                        kernel_size=3,
                        stride=1,
                        padding=1))
        features.append(
            Mixed_3a(config_channels, 'features.%d' % len(features)))
        features.append(
            Mixed_4a(config_channels, 'features.%d' % len(features)))
        features.append(
            Mixed_5a(config_channels, 'features.%d' % len(features)))
        features.append(
            Inception_A(config_channels, 'features.%d' % len(features)))
        features.append(
            Inception_A(config_channels, 'features.%d' % len(features)))
        features.append(
            Inception_A(config_channels, 'features.%d' % len(features)))
        features.append(
            Inception_A(config_channels, 'features.%d' % len(features)))
        features.append(
            Reduction_A(config_channels,
                        'features.%d' % len(features)))  # Mixed_6a
        features.append(
            Inception_B(config_channels, 'features.%d' % len(features)))
        features.append(
            Inception_B(config_channels, 'features.%d' % len(features)))
        features.append(
            Inception_B(config_channels, 'features.%d' % len(features)))
        features.append(
            Inception_B(config_channels, 'features.%d' % len(features)))
        features.append(
            Inception_B(config_channels, 'features.%d' % len(features)))
        features.append(
            Inception_B(config_channels, 'features.%d' % len(features)))
        features.append(
            Inception_B(config_channels, 'features.%d' % len(features)))
        features.append(
            Reduction_B(config_channels,
                        'features.%d' % len(features)))  # Mixed_7a
        features.append(
            Inception_C(config_channels, 'features.%d' % len(features)))
        features.append(
            Inception_C(config_channels, 'features.%d' % len(features)))
        features.append(
            Inception_C(config_channels, 'features.%d' % len(features)))
        features.append(
            nn.Conv2d(config_channels.channels,
                      model.output_channels(len(anchors), num_cls), 1))
        self.features = nn.Sequential(*features)

        try:
            gamma = config_channels.config.getboolean('batch_norm', 'gamma')
        except (configparser.NoSectionError, configparser.NoOptionError):
            gamma = True
        try:
            beta = config_channels.config.getboolean('batch_norm', 'beta')
        except (configparser.NoSectionError, configparser.NoOptionError):
            beta = True
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight = nn.init.kaiming_normal(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
                m.weight.requires_grad = gamma
                m.bias.requires_grad = beta
        try:
            if config_channels.config.getboolean('model', 'pretrained'):
                settings = pretrained_settings['inceptionv4'][
                    config_channels.config.get('inception4', 'pretrained')]
                logging.info('use pretrained model: ' + str(settings))
                state_dict = self.state_dict()
                for key, value in torch.utils.model_zoo.load_url(
                        settings['url']).items():
                    if key in state_dict:
                        state_dict[key] = value
                self.load_state_dict(state_dict)
        except (configparser.NoSectionError, configparser.NoOptionError):
            pass