Пример #1
0
 def __init__(self,
              in_planes,
              out_planes,
              scale,
              is_aux=False,
              norm_layer=nn.BatchNorm2d,
              real_time=0):
     super(BiSeNetHead, self).__init__()
     self.real_time = real_time
     if self.real_time:
         feature_num = 128
     else:
         feature_num = 256
     if is_aux:
         self.conv_3x3 = ConvBnRelu(in_planes,
                                    feature_num,
                                    3,
                                    1,
                                    1,
                                    has_bn=True,
                                    norm_layer=norm_layer,
                                    has_relu=True,
                                    has_bias=False)
     else:
         self.conv_3x3 = ConvBnRelu(in_planes,
                                    64,
                                    3,
                                    1,
                                    1,
                                    has_bn=True,
                                    norm_layer=norm_layer,
                                    has_relu=True,
                                    has_bias=False)
     # self.dropout = nn.Dropout(0.1)
     if is_aux:
         self.conv_1x1 = nn.Conv2d(feature_num,
                                   out_planes,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
     else:
         self.conv_1x1 = nn.Conv2d(64,
                                   out_planes,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
     self.scale = scale
Пример #2
0
    def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
                 padding=0, dilation=1,
                 has_relu=True, norm_layer=nn.BatchNorm2d):
        super(SeparableConvBnRelu, self).__init__()

        self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size, stride,
                               padding, dilation, groups=in_channels,
                               bias=False)
        self.point_wise_cbr = ConvBnRelu(in_channels, out_channels, 1, 1, 0,
                                         has_bn=True, norm_layer=norm_layer,
                                         has_relu=has_relu, has_bias=False)
Пример #3
0
 def __init__(self,
              in_planes,
              out_planes,
              scale,
              is_aux=False,
              norm_layer=nn.BatchNorm2d):
     super(BiSeNetHead, self).__init__()
     if is_aux:
         self.conv_3x3 = ConvBnRelu(in_planes,
                                    256,
                                    3,
                                    1,
                                    1,
                                    has_bn=True,
                                    norm_layer=norm_layer,
                                    has_relu=True,
                                    has_bias=False)
     else:
         self.conv_3x3 = ConvBnRelu(in_planes,
                                    256,
                                    3,
                                    1,
                                    1,
                                    has_bn=True,
                                    norm_layer=norm_layer,
                                    has_relu=True,
                                    has_bias=False)
     # self.dropout = nn.Dropout(0.1)
     if is_aux:
         self.conv_1x1 = nn.Conv2d(256,
                                   out_planes,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
     else:
         self.conv_1x1 = nn.Conv2d(256,
                                   out_planes,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
     self.scale = scale
Пример #4
0
 def __init__(self, in_planes, out_planes, norm_layer=nn.BatchNorm2d):
     super(SpatialPath, self).__init__()
     inner_channel = 64
     self.conv_7x7 = ConvBnRelu(in_planes,
                                inner_channel,
                                7,
                                2,
                                3,
                                has_bn=True,
                                norm_layer=norm_layer,
                                has_relu=True,
                                has_bias=False)
     self.conv_3x3_1 = ConvBnRelu(inner_channel,
                                  inner_channel,
                                  3,
                                  2,
                                  1,
                                  has_bn=True,
                                  norm_layer=norm_layer,
                                  has_relu=True,
                                  has_bias=False)
     self.conv_3x3_2 = ConvBnRelu(inner_channel,
                                  inner_channel,
                                  3,
                                  2,
                                  1,
                                  has_bn=True,
                                  norm_layer=norm_layer,
                                  has_relu=True,
                                  has_bias=False)
     self.conv_1x1 = ConvBnRelu(inner_channel,
                                out_planes,
                                1,
                                1,
                                0,
                                has_bn=True,
                                norm_layer=norm_layer,
                                has_relu=True,
                                has_bias=False)
Пример #5
0
    def __init__(self, block, layers, channels, norm_layer=nn.BatchNorm2d):
        super(Xception, self).__init__()

        self.in_channels = 8
        self.conv1 = ConvBnRelu(3, self.in_channels, 3, 2, 1,
                                has_bn=True, norm_layer=norm_layer,
                                has_relu=True, has_bias=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = self._make_layer(block, norm_layer,
                                       layers[0], channels[0], stride=2)
        self.layer2 = self._make_layer(block, norm_layer,
                                       layers[1], channels[1], stride=2)
        self.layer3 = self._make_layer(block, norm_layer,
                                       layers[2], channels[2], stride=2)
Пример #6
0
    def __init__(self,
                 out_planes,
                 is_training,
                 criterion,
                 ohem_criterion,
                 pretrained_model=None,
                 norm_layer=nn.BatchNorm2d,
                 in_planes=3,
                 real_time=0):
        super(BiSeNet, self).__init__()
        self.real_time = real_time
        self.context_path = resnet18(pretrained_model,
                                     norm_layer=norm_layer,
                                     deep_stem=False,
                                     stem_width=64,
                                     in_planes=in_planes)

        self.business_layer = []
        self.is_training = is_training

        self.spatial_path = SpatialPath(in_planes, 128, norm_layer)

        conv_channel = 128
        self.global_context = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            ConvBnRelu(512,
                       conv_channel,
                       1,
                       1,
                       0,
                       has_bn=True,
                       has_relu=True,
                       has_bias=False,
                       norm_layer=norm_layer))

        # stage = [512, 256, 128, 64]
        arms = [
            AttentionRefinement(512, conv_channel, norm_layer),
            AttentionRefinement(256, conv_channel, norm_layer)
        ]
        refines = [
            ConvBnRelu(conv_channel,
                       conv_channel,
                       3,
                       1,
                       1,
                       has_bn=True,
                       norm_layer=norm_layer,
                       has_relu=True,
                       has_bias=False),
            ConvBnRelu(conv_channel,
                       conv_channel,
                       3,
                       1,
                       1,
                       has_bn=True,
                       norm_layer=norm_layer,
                       has_relu=True,
                       has_bias=False)
        ]
        if self.real_time:
            if is_training:
                heads = [
                    BiSeNetHead(conv_channel,
                                out_planes,
                                2,
                                True,
                                norm_layer,
                                real_time=real_time),
                    BiSeNetHead(conv_channel,
                                out_planes,
                                1,
                                True,
                                norm_layer,
                                real_time=real_time),
                    BiSeNetHead(conv_channel * 2,
                                out_planes,
                                1,
                                False,
                                norm_layer,
                                real_time=real_time)
                ]
            else:
                heads = [
                    None, None,
                    BiSeNetHead(conv_channel * 2,
                                out_planes,
                                1,
                                False,
                                norm_layer,
                                real_time=real_time)
                ]
        else:
            heads = [
                BiSeNetHead(conv_channel,
                            out_planes,
                            16,
                            True,
                            norm_layer,
                            real_time=real_time),
                BiSeNetHead(conv_channel,
                            out_planes,
                            8,
                            True,
                            norm_layer,
                            real_time=real_time),
                BiSeNetHead(conv_channel * 2,
                            out_planes,
                            1,
                            False,
                            norm_layer,
                            real_time=real_time)
            ]

        self.ffm = FeatureFusion(conv_channel * 2, conv_channel * 2, 1,
                                 norm_layer)

        self.arms = nn.ModuleList(arms)
        self.refines = nn.ModuleList(refines)
        self.heads = nn.ModuleList(heads)

        self.business_layer.append(self.spatial_path)
        self.business_layer.append(self.global_context)
        self.business_layer.append(self.arms)
        self.business_layer.append(self.refines)
        self.business_layer.append(self.heads)
        self.business_layer.append(self.ffm)

        if is_training:
            self.criterion = criterion
            self.ohem_criterion = ohem_criterion
Пример #7
0
    def __init__(self,
                 out_planes,
                 is_training,
                 criterion,
                 pretrained_model=None,
                 norm_layer=nn.BatchNorm2d):
        super(BiSeNet, self).__init__()
        self.context_path = resnet101(pretrained_model,
                                      norm_layer=norm_layer,
                                      bn_eps=config.bn_eps,
                                      bn_momentum=config.bn_momentum,
                                      deep_stem=True,
                                      stem_width=64)

        self.business_layer = []
        self.is_training = is_training

        self.spatial_path = SpatialPath(3, 128, norm_layer)

        conv_channel = 128
        self.global_context = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            ConvBnRelu(2048,
                       conv_channel,
                       1,
                       1,
                       0,
                       has_bn=True,
                       has_relu=True,
                       has_bias=False,
                       norm_layer=norm_layer))

        # stage = [512, 256, 128, 64]
        arms = [
            AttentionRefinement(2048, conv_channel, norm_layer),
            AttentionRefinement(1024, conv_channel, norm_layer)
        ]
        refines = [
            ConvBnRelu(conv_channel,
                       conv_channel,
                       3,
                       1,
                       1,
                       has_bn=True,
                       norm_layer=norm_layer,
                       has_relu=True,
                       has_bias=False),
            ConvBnRelu(conv_channel,
                       conv_channel,
                       3,
                       1,
                       1,
                       has_bn=True,
                       norm_layer=norm_layer,
                       has_relu=True,
                       has_bias=False)
        ]

        heads = [
            BiSeNetHead(conv_channel, out_planes, 16, True, norm_layer),
            BiSeNetHead(conv_channel, out_planes, 8, True, norm_layer),
            BiSeNetHead(conv_channel * 2, out_planes, 8, False, norm_layer)
        ]

        self.ffm = FeatureFusion(conv_channel * 2, conv_channel * 2, 1,
                                 norm_layer)

        self.arms = nn.ModuleList(arms)
        self.refines = nn.ModuleList(refines)
        self.heads = nn.ModuleList(heads)

        self.business_layer.append(self.spatial_path)
        self.business_layer.append(self.global_context)
        self.business_layer.append(self.arms)
        self.business_layer.append(self.refines)
        self.business_layer.append(self.heads)
        self.business_layer.append(self.ffm)

        if is_training:
            self.criterion = criterion