Esempio n. 1
0
    def __init__(self, out_planes, is_training=False,
                  pretrained_model=None,
                 norm_layer=nn.BatchNorm2d):
        super(BiSeNet, self).__init__()
        self.backbone = resnet18(pretrained_model, norm_layer=norm_layer,
                                     bn_eps=1e-5,
                                     bn_momentum=0.1,
                                     deep_stem=True, stem_width=64)

        self.business_layer = []
        self.is_training = is_training

        self.spatial_path = SpatialPath(3, 128, norm_layer)

        conv_channel = 128
        self.global_context = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            ConvBnRelu(512, conv_channel, 1, 1, 0,
                       has_bn=True,
                       has_relu=True, has_bias=False, norm_layer=norm_layer)
        )

        # stage = [512, 256, 128, 64]
        arms = [AttentionRefinement(512, conv_channel, norm_layer),
                AttentionRefinement(256, conv_channel, norm_layer)]
        refines = [ConvBnRelu(conv_channel, conv_channel, 3, 1, 1,
                              has_bn=True, norm_layer=norm_layer,
                              has_relu=True, has_bias=False),
                   ConvBnRelu(conv_channel, conv_channel, 3, 1, 1,
                              has_bn=True, norm_layer=norm_layer,
                              has_relu=True, has_bias=False)]

        if is_training:
            heads = [BiSeNetHead(conv_channel, out_planes, 2,
                                 True, norm_layer),
                     BiSeNetHead(conv_channel, out_planes, 1,
                                 True, norm_layer),
                     BiSeNetHead(conv_channel * 2, out_planes, 1,
                                 False, norm_layer)]
        else:
            heads = [None, None,
                     BiSeNetHead(conv_channel * 2, out_planes, 1,
                                 False, norm_layer)]

        self.ffm = FeatureFusion(conv_channel * 2, conv_channel * 2,
                                 1, norm_layer)

        self.arms = nn.ModuleList(arms)
        self.refines = nn.ModuleList(refines)
        self.heads = nn.ModuleList(heads)

        self.business_layer.append(self.spatial_path)
        self.business_layer.append(self.global_context)
        self.business_layer.append(self.arms)
        self.business_layer.append(self.refines)
        self.business_layer.append(self.heads)
        self.business_layer.append(self.ffm)
Esempio n. 2
0
    def __init__(self, nclass):
        super(ICNet, self).__init__()
        self.conv_sub1 = nn.Sequential(ConvBnRelu(3, 32, 3, 2, 1),
                                       ConvBnRelu(32, 32, 3, 2, 1),
                                       ConvBnRelu(32, 64, 3, 2, 1))
        self.backbone = PSPHead_res50()
        self.head = _ICHead(nclass)

        self.conv_sub4 = ConvBnRelu(512, 256, 1)
        self.conv_sub2 = ConvBnRelu(512, 256, 1)
Esempio n. 3
0
 def __init__(self, in_planes, out_planes, norm_layer=nn.BatchNorm2d):
     super(SpatialPath, self).__init__()
     inner_channel = 64
     self.conv_7x7 = ConvBnRelu(in_planes, inner_channel, 7, 2, 3,
                                has_bn=True, norm_layer=norm_layer,
                                has_relu=True, has_bias=False)
     self.conv_3x3_1 = ConvBnRelu(inner_channel, inner_channel, 3, 2, 1,
                                  has_bn=True, norm_layer=norm_layer,
                                  has_relu=True, has_bias=False)
     self.conv_3x3_2 = ConvBnRelu(inner_channel, inner_channel, 3, 2, 1,
                                  has_bn=True, norm_layer=norm_layer,
                                  has_relu=True, has_bias=False)
     self.conv_1x1 = ConvBnRelu(inner_channel, out_planes, 1, 1, 0,
                                has_bn=True, norm_layer=norm_layer,
                                has_relu=True, has_bias=False)
Esempio n. 4
0
    def __init__(self, block, layers, channels, norm_layer=nn.BatchNorm2d):
        super(Xception, self).__init__()

        self.in_channels = 8
        self.conv1 = ConvBnRelu(3,
                                self.in_channels,
                                3,
                                2,
                                1,
                                has_bn=True,
                                norm_layer=norm_layer,
                                has_relu=True,
                                has_bias=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = self._make_layer(block,
                                       norm_layer,
                                       layers[0],
                                       channels[0],
                                       stride=2)
        self.layer2 = self._make_layer(block,
                                       norm_layer,
                                       layers[1],
                                       channels[1],
                                       stride=2)
        self.layer3 = self._make_layer(block,
                                       norm_layer,
                                       layers[2],
                                       channels[2],
                                       stride=2)
Esempio n. 5
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size=1,
                 stride=1,
                 padding=0,
                 dilation=1,
                 has_relu=True,
                 norm_layer=nn.BatchNorm2d):
        super(SeparableConvBnRelu, self).__init__()

        self.conv1 = nn.Conv2d(in_channels,
                               in_channels,
                               kernel_size,
                               stride,
                               padding,
                               dilation,
                               groups=in_channels,
                               bias=False)
        self.point_wise_cbr = ConvBnRelu(in_channels,
                                         out_channels,
                                         1,
                                         1,
                                         0,
                                         has_bn=True,
                                         norm_layer=norm_layer,
                                         has_relu=has_relu,
                                         has_bias=False)
Esempio n. 6
0
    def __init__(self, nclass, **kwargs):
        super(DFANet, self).__init__()
        self.backbone = XceptionA()

        self.enc2_2 = Enc(240, 48, 4, **kwargs)
        self.enc3_2 = Enc(144, 96, 6, **kwargs)
        self.enc4_2 = Enc(288, 192, 4, **kwargs)
        self.fca_2 = FCAttention(192, **kwargs)

        self.enc2_3 = Enc(240, 48, 4, **kwargs)
        self.enc3_3 = Enc(144, 96, 6, **kwargs)
        self.enc3_4 = Enc(288, 192, 4, **kwargs)
        self.fca_3 = FCAttention(192, **kwargs)

        self.enc2_1_reduce = ConvBnRelu(48, 32, 1, **kwargs)
        self.enc2_2_reduce = ConvBnRelu(48, 32, 1, **kwargs)
        self.enc2_3_reduce = ConvBnRelu(48, 32, 1, **kwargs)
        self.conv_fusion = ConvBnRelu(32, 32, 1, **kwargs)

        self.fca_1_reduce = ConvBnRelu(192, 32, 1, **kwargs)
        self.fca_2_reduce = ConvBnRelu(192, 32, 1, **kwargs)
        self.fca_3_reduce = ConvBnRelu(192, 32, 1, **kwargs)
        self.conv_out = nn.Conv2d(32, nclass, 1)

        self.dsn1 = dsn(192, nclass)
        self.dsn2 = dsn(192, nclass)

        self.__setattr__('exclusive', [
            'enc2_2', 'enc3_2', 'enc4_2', 'fca_2', 'enc2_3', 'enc3_3',
            'enc3_4', 'fca_3', 'enc2_1_reduce', 'enc2_2_reduce',
            'enc2_3_reduce', 'conv_fusion', 'fca_1_reduce', 'fca_2_reduce',
            'fca_3_reduce', 'conv_out'
        ])
Esempio n. 7
0
 def __init__(self, in_planes, out_planes, scale,
              is_aux=False, norm_layer=nn.BatchNorm2d):
     super(BiSeNetHead, self).__init__()
     if is_aux:
         self.conv_3x3 = ConvBnRelu(in_planes, 128, 3, 1, 1,
                                    has_bn=True, norm_layer=norm_layer,
                                    has_relu=True, has_bias=False)
     else:
         self.conv_3x3 = ConvBnRelu(in_planes, 64, 3, 1, 1,
                                    has_bn=True, norm_layer=norm_layer,
                                    has_relu=True, has_bias=False)
     # self.dropout = nn.Dropout(0.1)
     if is_aux:
         self.conv_1x1 = nn.Conv2d(128, out_planes, kernel_size=1,
                                   stride=1, padding=0)
     else:
         self.conv_1x1 = nn.Conv2d(64, out_planes, kernel_size=1,
                                   stride=1, padding=0)
     self.scale = scale