Beispiel #1
0
    def __init__(self, block, layers, channels, classes=-1, thumbnail=False,
                 embedding_size=512, weight_norm=False, feature_norm=False,
                 need_cls_layer=True, **kwargs):
        super(SE_ResNetV2, self).__init__(classes, embedding_size, weight_norm,
                                          feature_norm, need_cls_layer, **kwargs)
        assert len(layers) == len(channels) - 1
        with self.name_scope():
            self.features = nn.HybridSequential(prefix='')
            self.features.add(nn.BatchNorm(scale=False, center=False))
            if thumbnail:
                self.features.add(_conv3x3(channels[0], 1, 0))
            else:
                self.features.add(nn.Conv2D(channels[0], 3, 1, 1, use_bias=False))
                self.features.add(nn.BatchNorm())
                self.features.add(nn.PReLU())
                self.features.add(nn.MaxPool2D(3, 2, 1))

            in_channels = channels[0]
            for i, num_layer in enumerate(layers):
                stride = 1 if i == 0 else 2
                self.features.add(self._make_layer(block, num_layer, channels[i + 1],
                                                   stride, i + 1, in_channels=in_channels))
                in_channels = channels[i + 1]
            self.features.add(nn.BatchNorm())
            self.features.add(nn.PReLU())
            self.features.add(nn.GlobalAvgPool2D())
            self.features.add(nn.Flatten())

            self.features.add(nn.Dense(embedding_size, use_bias=False))
            self.features.add(nn.BatchNorm(scale=False, center=False))
            self.features.add(nn.PReLU())
Beispiel #2
0
    def __init__(self,
                 channels,
                 stride,
                 downsample=False,
                 in_channels=0,
                 **kwargs):
        super(SE_BottleneckV2, self).__init__(**kwargs)
        self.bn1 = nn.BatchNorm()
        self.prelu1 = nn.PReLU()
        self.conv1 = nn.Conv2D(channels // 4,
                               kernel_size=1,
                               strides=1,
                               use_bias=False)
        self.bn2 = nn.BatchNorm()
        self.prelu2 = nn.PReLU()
        self.conv2 = _conv3x3(channels // 4, stride, channels // 4)
        self.bn3 = nn.BatchNorm()
        self.prelu3 = nn.PReLU()
        self.conv3 = nn.Conv2D(channels,
                               kernel_size=1,
                               strides=1,
                               use_bias=False)
        if downsample:
            self.downsample = nn.Conv2D(channels,
                                        1,
                                        stride,
                                        use_bias=False,
                                        in_channels=in_channels)
        else:
            self.downsample = None

        self.se = SELayer(channels, channels)