示例#1
0
    def __init__(self, in_channels, inner_channel, norm_layer=nn.BatchNorm2d):
        super(ObjectContext, self).__init__()
        self.in_channels = in_channels
        self.inner_channel = inner_channel

        self.reduce_conv = ConvBnRelu(self.in_channels, self.inner_channel,
                                      3, 1, 1,
                                      has_bn=True, has_relu=True,
                                      has_bias=False, norm_layer=norm_layer)

        self.intra_similarity_branch = nn.Sequential(
            SymmetricConv(self.inner_channel, 11, norm_layer),
            nn.Conv2d(self.inner_channel, config.prior_size ** 2, 1, 1, 0,
                      groups=16, bias=False),
            norm_layer(config.prior_size ** 2)
        )

        self.intra_post_conv = ConvBnRelu(self.inner_channel,
                                          self.inner_channel,
                                          1, 1, 0, has_bn=True, has_relu=True,
                                          has_bias=False, norm_layer=norm_layer)
        self.inter_post_conv = ConvBnRelu(self.inner_channel,
                                          self.inner_channel,
                                          1, 1, 0, has_bn=True, has_relu=True,
                                          has_bias=False, norm_layer=norm_layer)
示例#2
0
    def __init__(self, in_channels, out_planes, norm_layer=nn.BatchNorm2d):
        super(ObjectContext, self).__init__()
        self.in_channels = in_channels
        self.out_planes = out_planes

        self.local_aggregation = nn.Sequential(
            ConvBnRelu(self.in_channels,
                       self.out_planes,
                       3,
                       1,
                       1,
                       has_bn=True,
                       has_relu=True,
                       has_bias=False,
                       norm_layer=norm_layer),
            SymmetricConv(self.out_planes, 11, norm_layer))

        self.intra_post_conv = ConvBnRelu(self.out_planes,
                                          self.out_planes,
                                          1,
                                          1,
                                          0,
                                          has_bn=True,
                                          has_relu=True,
                                          has_bias=False,
                                          norm_layer=norm_layer)
        self.inter_post_conv = ConvBnRelu(self.out_planes,
                                          self.out_planes,
                                          1,
                                          1,
                                          0,
                                          has_bn=True,
                                          has_relu=True,
                                          has_bias=False,
                                          norm_layer=norm_layer)
示例#3
0
    def __init__(self, in_channels, inner_channel, norm_layer=nn.BatchNorm2d):
        super(ObjectContext, self).__init__()
        self.in_channels = in_channels
        self.inner_channel = inner_channel

        self.reduce_conv = ConvBnRelu(self.in_channels, self.inner_channel,
                                      3, 1, 1,
                                      has_bn=True, has_relu=True,
                                      has_bias=False, norm_layer=norm_layer)

        self.intra_similarity_branch = nn.Sequential(
            SymmetricConv(self.inner_channel, 5, norm_layer),
            ConvBnRelu(self.inner_channel, 3600, 1, 1, 0,
                       has_bn=True, has_relu=False,
                       has_bias=False, norm_layer=norm_layer),
        )

        self.intra_post_conv = ConvBnRelu(self.inner_channel,
                                          self.inner_channel,
                                          1, 1, 0, has_bn=True, has_relu=True,
                                          has_bias=False, norm_layer=norm_layer)
        self.inter_post_conv = ConvBnRelu(self.inner_channel,
                                          self.inner_channel,
                                          1, 1, 0, has_bn=True, has_relu=True,
                                          has_bias=False, norm_layer=norm_layer)
示例#4
0
    def __init__(self, in_channels, inner_channel, norm_layer=nn.BatchNorm2d):
        super(ObjectContext, self).__init__()
        self.in_channels = in_channels
        self.inner_channel = inner_channel

        self.reduce_conv = ConvBnRelu(self.in_channels, self.inner_channel,
                                      3, 1, 1,
                                      has_bn=True, has_relu=True,
                                      has_bias=False, norm_layer=norm_layer)

        self.intra_similarity_branch = nn.Sequential(
            AtrousSpatialPyramidPooling(self.inner_channel,
                                        norm_layer=norm_layer),
            nn.Conv2d(self.inner_channel, 3600, 1, 1, 0, groups=16,
                      bias=False),
            norm_layer(3600)
        )

        self.intra_post_conv = ConvBnRelu(self.inner_channel,
                                          self.inner_channel,
                                          1, 1, 0, has_bn=True, has_relu=True,
                                          has_bias=False, norm_layer=norm_layer)
        self.inter_post_conv = ConvBnRelu(self.inner_channel,
                                          self.inner_channel,
                                          1, 1, 0, has_bn=True, has_relu=True,
                                          has_bias=False, norm_layer=norm_layer)
示例#5
0
    def __init__(self, in_channels, inner_channel, norm_layer=nn.BatchNorm2d):
        super(ObjectContext, self).__init__()
        self.in_channels = in_channels
        self.inner_channel = inner_channel

        self.reduce_conv = ConvBnRelu(self.in_channels, self.inner_channel,
                                      1, 1, 0,
                                      has_bn=True, has_relu=True,
                                      has_bias=False, norm_layer=norm_layer)

        self.intra_similarity_branch = nn.Sequential(
            nn.AvgPool2d(9, 1, 4, ceil_mode=True, count_include_pad=False),
            ConvBnRelu(self.inner_channel, self.inner_channel, 1, 1, 0,
                       has_bn=True, has_relu=True,
                       has_bias=False, norm_layer=norm_layer),
            ConvBnRelu(self.inner_channel, 3600, 1, 1, 0,
                       has_bn=True, has_relu=False,
                       has_bias=False, norm_layer=norm_layer),
        )

        self.intra_post_conv = ConvBnRelu(self.inner_channel,
                                          self.inner_channel,
                                          1, 1, 0, has_bn=True, has_relu=True,
                                          has_bias=False, norm_layer=norm_layer)
        self.inter_post_conv = ConvBnRelu(self.inner_channel,
                                          self.inner_channel,
                                          1, 1, 0, has_bn=True, has_relu=True,
                                          has_bias=False, norm_layer=norm_layer)
示例#6
0
    def __init__(self, out_planes, criterion, pretrained_model=None,
                 norm_layer=nn.BatchNorm2d):
        super(CPNet, self).__init__()
        self.backbone = resnet50(pretrained_model, norm_layer=norm_layer,
                                 bn_eps=config.bn_eps,
                                 bn_momentum=config.bn_momentum,
                                 deep_stem=True, stem_width=64)
        self.generate_dilation(self.backbone.layer3, dilation=2)
        self.generate_dilation(self.backbone.layer4, dilation=4,
                               multi_grid=[1, 2, 4])

        self.business_layer = []

        self.context = ObjectContext(2048, 512, norm_layer)

        self.head_layer = nn.Sequential(
            ConvBnRelu(2048 + 512, 512, 3, 1, 1,
                       has_bn=True,
                       has_relu=True, has_bias=False, norm_layer=norm_layer),
            nn.Dropout2d(0.1, inplace=False),
            nn.Conv2d(512, out_planes, kernel_size=1)
        )
        self.aux_layer = nn.Sequential(
            ConvBnRelu(1024, 512, 3, 1, 1,
                       has_bn=True,
                       has_relu=True, has_bias=False, norm_layer=norm_layer),
            nn.Dropout2d(0.1, inplace=False),
            nn.Conv2d(512, out_planes, kernel_size=1)
        )
        self.business_layer.append(self.context)
        self.business_layer.append(self.head_layer)
        self.business_layer.append(self.aux_layer)

        self.criterion = criterion
        self.bce_criterion = nn.BCELoss(reduction='mean')
示例#7
0
    def __init__(self, out_planes, is_training,
                 criterion, ohem_criterion, pretrained_model=None,
                 norm_layer=nn.BatchNorm2d):
        super(BiSeNet, self).__init__()
        self.context_path = xception39(pretrained_model, norm_layer=norm_layer)

        self.business_layer = []
        self.is_training = is_training

        self.spatial_path = SpatialPath(3, 128, norm_layer)

        conv_channel = 128
        self.global_context = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            ConvBnRelu(256, conv_channel, 1, 1, 0,
                       has_bn=True,
                       has_relu=True, has_bias=False, norm_layer=norm_layer)
        )

        # stage = [256, 128, 64]
        arms = [AttentionRefinement(256, conv_channel, norm_layer),
                AttentionRefinement(128, conv_channel, norm_layer)]
        refines = [ConvBnRelu(conv_channel, conv_channel, 3, 1, 1,
                              has_bn=True, norm_layer=norm_layer,
                              has_relu=True, has_bias=False),
                   ConvBnRelu(conv_channel, conv_channel, 3, 1, 1,
                              has_bn=True, norm_layer=norm_layer,
                              has_relu=True, has_bias=False)]

        if is_training:
            heads = [BiSeNetHead(conv_channel, out_planes, 2,
                                 True, norm_layer),
                     BiSeNetHead(conv_channel, out_planes, 1,
                                 True, norm_layer),
                     BiSeNetHead(conv_channel * 2, out_planes, 1,
                                 False, norm_layer)]
        else:
            heads = [None, None,
                     BiSeNetHead(conv_channel * 2, out_planes, 8,
                                 False, norm_layer)]

        self.ffm = FeatureFusion(conv_channel * 2, conv_channel * 2,
                                 1, norm_layer)

        self.arms = nn.ModuleList(arms)
        self.refines = nn.ModuleList(refines)
        self.heads = nn.ModuleList(heads)

        self.business_layer.append(self.spatial_path)
        self.business_layer.append(self.global_context)
        self.business_layer.append(self.arms)
        self.business_layer.append(self.refines)
        self.business_layer.append(self.heads)
        self.business_layer.append(self.ffm)

        if is_training:
            self.criterion = criterion
            self.ohem_criterion = ohem_criterion
示例#8
0
    def __init__(self, out_planes, is_training,
                 criterion, pretrained_model=None,
                 norm_layer=nn.BatchNorm2d):
        super(BiSeNet, self).__init__()
        self.context_path = resnet101(pretrained_model, norm_layer=norm_layer,
                                      bn_eps=config.bn_eps,
                                      bn_momentum=config.bn_momentum,
                                      deep_stem=True, stem_width=64)

        self.business_layer = []
        self.is_training = is_training

        self.spatial_path = SpatialPath(3, 128, norm_layer)

        conv_channel = 128
        self.global_context = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            ConvBnRelu(2048, conv_channel, 1, 1, 0,
                       has_bn=True,
                       has_relu=True, has_bias=False, norm_layer=norm_layer)
        )

        # stage = [512, 256, 128, 64]
        arms = [AttentionRefinement(2048, conv_channel, norm_layer),
                AttentionRefinement(1024, conv_channel, norm_layer)]
        refines = [ConvBnRelu(conv_channel, conv_channel, 3, 1, 1,
                              has_bn=True, norm_layer=norm_layer,
                              has_relu=True, has_bias=False),
                   ConvBnRelu(conv_channel, conv_channel, 3, 1, 1,
                              has_bn=True, norm_layer=norm_layer,
                              has_relu=True, has_bias=False)]

        heads = [BiSeNetHead(conv_channel, out_planes, 16,
                             True, norm_layer),
                 BiSeNetHead(conv_channel, out_planes, 8,
                             True, norm_layer),
                 BiSeNetHead(conv_channel * 2, out_planes, 8,
                             False, norm_layer)]

        self.ffm = FeatureFusion(conv_channel * 2, conv_channel * 2,
                                 1, norm_layer)

        self.arms = nn.ModuleList(arms)
        self.refines = nn.ModuleList(refines)
        self.heads = nn.ModuleList(heads)

        self.business_layer.append(self.spatial_path)
        self.business_layer.append(self.global_context)
        self.business_layer.append(self.arms)
        self.business_layer.append(self.refines)
        self.business_layer.append(self.heads)
        self.business_layer.append(self.ffm)

        if is_training:
            self.criterion = criterion
    def __init__(self,
                 inplane,
                 outplane,
                 criterion=None,
                 aux_criterion=None,
                 area_alpa=None,
                 pretrained_model=None,
                 norm_layer=nn.BatchNorm2d,
                 detection=False):
        super(deeperlab, self).__init__()
        self.backbone = xception.xception71(pretrained_model,
                                            inplane=inplane,
                                            norm_layer=norm_layer,
                                            bn_eps=config.bn_eps,
                                            bn_momentum=config.bn_momentum,
                                            inplace=True)
        self.business_layer = []
        self.s2d = space_to_dense(4)
        self.d2s = torch.nn.PixelShuffle(upscale_factor=4)

        self.aspp = ASPP("deeperlab", 8, norm_layer)
        self.conv1 = ConvBnRelu(128,
                                32,
                                1,
                                1,
                                0,
                                norm_layer=norm_layer,
                                bn_eps=config.bn_eps)
        self.conv2 = ConvBnRelu(768,
                                4096,
                                3,
                                1,
                                1,
                                norm_layer=norm_layer,
                                bn_eps=config.bn_eps)
        self.conv3 = ConvBnRelu(4096,
                                4096,
                                3,
                                1,
                                1,
                                norm_layer=norm_layer,
                                bn_eps=config.bn_eps)

        self.seg_conv = deeperlab_seg_head(256,
                                           outplane,
                                           4,
                                           norm_layer=norm_layer)
        self.business_layer.append(self.s2d)
        self.business_layer.append(self.d2s)
        self.business_layer.append(self.aspp)
        self.business_layer.append(self.conv1)
        self.business_layer.append(self.conv2)
        self.business_layer.append(self.conv3)
        self.business_layer.append(self.seg_conv)
        self.criterion = criterion
示例#10
0
    def __init__(self,
                 out_planes,
                 criterion,
                 inplace=True,
                 pretrained_model=None,
                 norm_layer=nn.BatchNorm2d):
        super(Network, self).__init__()
        business_channel_num = config.business_channel_num

        self.backbone = resnet50(pretrained_model,
                                 inplace=inplace,
                                 norm_layer=norm_layer,
                                 bn_eps=config.bn_eps,
                                 bn_momentum=config.bn_momentum,
                                 deep_stem=True,
                                 stem_width=64)

        self.latent_layers = nn.ModuleList()
        self.refine_layers = nn.ModuleList()
        self.predict_layer = PredictHead(business_channel_num,
                                         out_planes,
                                         4,
                                         norm_layer=norm_layer)
        for idx, channel in enumerate(self.backbone.layer_channel_nums[::-1]):
            self.latent_layers.append(
                ConvBnRelu(channel,
                           business_channel_num,
                           3,
                           1,
                           1,
                           has_bn=False,
                           has_relu=False,
                           has_bias=False,
                           norm_layer=norm_layer))
            self.refine_layers.append(
                ConvBnRelu(business_channel_num,
                           business_channel_num,
                           1,
                           1,
                           0,
                           has_bn=False,
                           has_relu=False,
                           has_bias=False,
                           norm_layer=norm_layer))

        self.business_layers = [
            self.latent_layers, self.refine_layers, self.predict_layer
        ]

        self.criterion = criterion
示例#11
0
 def __init__(self, norm_layer=nn.BatchNorm2d):
     super(MEEM, self).__init__()
     self.toptodown = nn.Sequential(
         ConvBnRelu(2048,
                    512,
                    1,
                    1,
                    0,
                    has_bn=True,
                    has_relu=True,
                    has_bias=False,
                    norm_layer=norm_layer),
         ConvBnRelu(512,
                    256,
                    1,
                    1,
                    0,
                    has_bn=True,
                    has_relu=True,
                    has_bias=False,
                    norm_layer=norm_layer))
     self.roadfeat_down = ConvBnRelu(256,
                                     256,
                                     3,
                                     1,
                                     1,
                                     has_bn=True,
                                     has_relu=True,
                                     has_bias=False,
                                     norm_layer=norm_layer)
     self.reduce_concat = ConvBnRelu(512,
                                     256,
                                     3,
                                     1,
                                     1,
                                     has_bn=True,
                                     has_relu=True,
                                     has_bias=False,
                                     norm_layer=norm_layer)
     self.fusion_layer = ConvBnRelu(256,
                                    256,
                                    3,
                                    1,
                                    1,
                                    has_bn=True,
                                    has_relu=True,
                                    has_bias=False,
                                    norm_layer=norm_layer)
     self.score_layer = nn.Conv2d(256, 1, 3, 1, 1)
示例#12
0
 def __init__(self, in_planes, out_planes, norm_layer=nn.BatchNorm2d):
     super(SpatialPath, self).__init__()
     inner_channel = 64
     self.conv_7x7 = ConvBnRelu(in_planes, inner_channel, 7, 2, 3,
                                has_bn=True, norm_layer=norm_layer,
                                has_relu=True, has_bias=False)
     self.conv_3x3_1 = ConvBnRelu(inner_channel, inner_channel, 3, 2, 1,
                                  has_bn=True, norm_layer=norm_layer,
                                  has_relu=True, has_bias=False)
     self.conv_3x3_2 = ConvBnRelu(inner_channel, inner_channel, 3, 2, 1,
                                  has_bn=True, norm_layer=norm_layer,
                                  has_relu=True, has_bias=False)
     self.conv_1x1 = ConvBnRelu(inner_channel, out_planes, 1, 1, 0,
                                has_bn=True, norm_layer=norm_layer,
                                has_relu=True, has_bias=False)
示例#13
0
    def __init__(self,
                 source_in_planes,
                 target_in_planes,
                 bn_eps=1e-5,
                 bn_momentum=0.1,
                 inplace=True,
                 norm_layer=nn.BatchNorm2d):
        super(InterFeatureFusion, self).__init__()
        # self.convSource = nn.Sequential(
        #     nn.Conv2d(source_in_planes, target_in_planes, 1, bias=False)
        #     norm_layer(target_in_planes, eps=bn_eps, momentum=bn_momentum)
        #     )
        # self.convTarget = nn.Sequential(
        #     nn.Conv2d(target_in_planes, target_in_planes, 1, bias=False)
        #     norm_layer(target_in_planes, eps=bn_eps, momentum=bn_momentum)
        #     )
        self.convSource1 = ConvBnRelu(source_in_planes,
                                      target_in_planes,
                                      3,
                                      1,
                                      1,
                                      has_bn=True,
                                      norm_layer=norm_layer,
                                      has_relu=True,
                                      inplace=inplace,
                                      has_bias=False)

        self.convSource2 = ConvBnRelu(target_in_planes,
                                      target_in_planes,
                                      3,
                                      1,
                                      1,
                                      has_bn=True,
                                      norm_layer=norm_layer,
                                      has_relu=True,
                                      inplace=inplace,
                                      has_bias=False)

        self.convTarget = ConvBnRelu(target_in_planes,
                                     target_in_planes,
                                     3,
                                     1,
                                     1,
                                     has_bn=True,
                                     norm_layer=norm_layer,
                                     has_relu=True,
                                     inplace=inplace,
                                     has_bias=False)
示例#14
0
    def __init__(self, out_planes, is_training,
                 criterion, pretrained_model=None,
                 norm_layer=nn.BatchNorm2d):
        super(conf, self).__init__()
        
        self.is_training = is_training
        self.business_layer = []

        if is_training:
            self.criterion = criterion
        
        self.encoder = resnet101(pretrained_model, norm_layer=norm_layer,
                                     bn_eps=config.bn_eps,
                                     bn_momentum=config.bn_momentum,
                                     deep_stem=True, stem_width=64)

        self.context_ff = AttentionFusion(1024, 2048, 128)
        self.spatial_conv = ConvBnRelu(256, 128, 1, 1, 0, dilation=1,
                                   has_bn=True, norm_layer=norm_layer,
                                   has_relu=True, has_bias=False)

        self.loc_conf = LocationConfidence(128+128, 1)
        
        self.refine_block = RefineOutput(128, out_planes, 4)
        self.spatial_refine_block = RefineOutput(128, out_planes, 4)
        self.context_refine_block = RefineOutput(128, out_planes, 16)

        self.business_layer.append(self.context_ff)
        self.business_layer.append(self.loc_conf)
        self.business_layer.append(self.spatial_conv)
        self.business_layer.append(self.refine_block)
        self.business_layer.append(self.spatial_refine_block)
        self.business_layer.append(self.context_refine_block)
示例#15
0
    def __init__(self, block, layers, channels, norm_layer=nn.BatchNorm2d):
        super(Xception, self).__init__()

        self.in_channels = 8
        self.conv1 = ConvBnRelu(3,
                                self.in_channels,
                                3,
                                2,
                                1,
                                has_bn=True,
                                norm_layer=norm_layer,
                                has_relu=True,
                                has_bias=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = self._make_layer(block,
                                       norm_layer,
                                       layers[0],
                                       channels[0],
                                       stride=2)
        self.layer2 = self._make_layer(block,
                                       norm_layer,
                                       layers[1],
                                       channels[1],
                                       stride=2)
        self.layer3 = self._make_layer(block,
                                       norm_layer,
                                       layers[2],
                                       channels[2],
                                       stride=2)
示例#16
0
 def __init__(self,
              low_in_channels,
              high_in_channels,
              out_channels,
              key_channels,
              value_channels,
              dropout,
              sizes=([1]),
              norm_layer=nn.BatchNorm2d,
              psp_size=(1, 3, 6, 8)):
     super(AFNB, self).__init__()
     self.stages = []
     self.norm_type = norm_layer
     self.psp_size = psp_size
     self.stages = nn.ModuleList([
         self._make_stage([low_in_channels, high_in_channels], out_channels,
                          key_channels, value_channels, size)
         for size in sizes
     ])
     self.conv_bn_dropout = nn.Sequential(
         # nn.Conv2d(out_channels + high_in_channels, out_channels, kernel_size=1, padding=0),
         # ModuleHelper.BatchNorm2d(norm_type=self.norm_type)(out_channels),
         ConvBnRelu(out_channels + high_in_channels,
                    out_channels,
                    1,
                    1,
                    0,
                    has_bn=True,
                    norm_layer=norm_layer,
                    has_relu=True,
                    has_bias=False),
         nn.Dropout2d(dropout))
示例#17
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 expansion,
                 stride,
                 dilation=1,
                 norm_layer=nn.BatchNorm2d):
        super(Block, self).__init__()
        self.use_res_connect = (stride == 1)
        self.in_channels = in_channels

        if stride == 1:
            in_channels = in_channels // 2
            mid_channels = round(in_channels * expansion)
            out_channels = in_channels
        elif stride == 2:
            mid_channels = out_channels
        self.residual_branch = nn.Sequential(
            ConvBnRelu(in_channels,
                       mid_channels,
                       3,
                       1,
                       1,
                       dilation,
                       has_relu=True,
                       norm_layer=norm_layer),
            SeparableConvBnRelu(mid_channels,
                                out_channels,
                                3,
                                stride,
                                dilation,
                                dilation,
                                has_relu=False,
                                norm_layer=norm_layer))
示例#18
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size=1,
                 stride=1,
                 padding=0,
                 dilation=1,
                 has_relu=True,
                 norm_layer=nn.BatchNorm2d):
        super(SeparableConvBnRelu, self).__init__()

        self.conv1 = nn.Conv2d(in_channels,
                               in_channels,
                               kernel_size,
                               stride,
                               padding,
                               dilation,
                               groups=in_channels,
                               bias=False)
        self.point_wise_cbr = ConvBnRelu(in_channels,
                                         out_channels,
                                         1,
                                         1,
                                         0,
                                         has_bn=True,
                                         norm_layer=norm_layer,
                                         has_relu=has_relu,
                                         has_bias=False)
示例#19
0
    def __init__(self, in_channels, size, norm_layer=nn.BatchNorm2d):
        super(ObjectContextBranch, self).__init__()
        self.in_channels = in_channels
        # self.inner_channel = inner_channel
        self.size = size
        self.point_num = size**2

        self.pool = nn.AdaptiveAvgPool2d(self.size)
        self.intra_similarity_branch = nn.Sequential(
            ConvBnRelu(self.in_channels,
                       self.in_channels,
                       1,
                       1,
                       0,
                       has_bn=True,
                       has_relu=True,
                       has_bias=False,
                       norm_layer=norm_layer),
            ConvBnRelu(self.in_channels,
                       self.point_num,
                       1,
                       1,
                       0,
                       has_bn=True,
                       has_relu=False,
                       has_bias=False,
                       norm_layer=norm_layer),
        )

        self.intra_post_conv = ConvBnRelu(self.in_channels,
                                          self.in_channels,
                                          1,
                                          1,
                                          0,
                                          has_bn=True,
                                          has_relu=True,
                                          has_bias=False,
                                          norm_layer=norm_layer)
        self.inter_post_conv = ConvBnRelu(self.in_channels,
                                          self.in_channels,
                                          1,
                                          1,
                                          0,
                                          has_bn=True,
                                          has_relu=True,
                                          has_bias=False,
                                          norm_layer=norm_layer)
示例#20
0
 def __init__(self, norm_layer=nn.BatchNorm2d):
     super(FPN, self).__init__()
     self.start_level = 0
     self.backbone_end_level = 4
     in_channels = [256, 512, 1024, 2048]
     out_channel = 256
     self.dila_FCN = True
     self.lateral_convs = nn.ModuleList()
     self.fpn_convs = nn.ModuleList()
     for i in range(self.start_level, self.backbone_end_level):
         l_conv = ConvBnRelu(in_channels[i], out_channel, 1, 1, 0, 1,
                             has_bn=True, inplace=False,
                             has_relu=True, has_bias=False, norm_layer=norm_layer)
         fpn_conv = ConvBnRelu(out_channel, out_channel, 3, 1, 1, 1,
                               has_bn=True, inplace=False,
                               has_relu=True, has_bias=False, norm_layer=norm_layer)
         self.lateral_convs.append(l_conv)
         self.fpn_convs.append(fpn_conv)
示例#21
0
 def __init__(self, in_planes, out_planes, scale=1, norm_layer=nn.BatchNorm2d):
     super(RefineOutput, self).__init__()
     self.conv_3x3 = ConvBnRelu(in_planes, 64, 3, 1, 1,
                                    has_bn=True, norm_layer=norm_layer,
                                    has_relu=True, has_bias=False)
     # self.dropout = nn.Dropout(0.1)
     self.conv_1x1 = nn.Conv2d(64, out_planes, kernel_size=1,
                                   stride=1, padding=0)
     self.scale = scale
示例#22
0
    def __init__(self, out_planes, criterion, inplace=True,
                 pretrained_model=None, norm_layer=nn.BatchNorm2d):
        super(Network, self).__init__()
        business_channel_num = config.business_channel_num
        embed_channel_num = config.embed_channel_num

        self.backbone = resnet101(pretrained_model, inplace=inplace,
                                  norm_layer=norm_layer,
                                  bn_eps=config.bn_eps,
                                  bn_momentum=config.bn_momentum,
                                  deep_stem=True, stem_width=64)
        block_channel_nums = self.backbone.layer_channel_nums

        self.latent_layers = nn.ModuleList()
        self.refine_layers = nn.ModuleList()
        self.embed_layers = nn.ModuleList()
        self.mst_layers = nn.ModuleList()
        self.tree_filter_layers = nn.ModuleList()
        self.global_context = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            ConvBnRelu(block_channel_nums[-1], business_channel_num,
                       1, 1, 0, has_bn=True, has_relu=True, has_bias=False,
                       norm_layer=norm_layer))
        self.predict_layer = PredictHead(business_channel_num, out_planes, 4, norm_layer=norm_layer)
        for idx, channel in enumerate(block_channel_nums[::-1]):
            self.latent_layers.append(
                RefineResidual(channel, business_channel_num, 3,
                               norm_layer=norm_layer, has_relu=True)
            )
            self.refine_layers.append(
                RefineResidual(business_channel_num, business_channel_num, 3,
                               norm_layer=norm_layer, has_relu=True)
            )
            self.embed_layers.append(
                ConvBnRelu(business_channel_num, embed_channel_num, 1, 1, 0, has_bn=False,
                           has_relu=False, has_bias=False, norm_layer=norm_layer))
            self.mst_layers.append(MinimumSpanningTree(TreeFilter2D.norm2_distance))
            self.tree_filter_layers.append(TreeFilter2D(groups=config.tree_filter_group_num))

        self.business_layers = [self.global_context, self.latent_layers, self.refine_layers,
                                self.predict_layer, self.embed_layers, self.mst_layers,
                                self.tree_filter_layers]

        self.criterion = criterion
示例#23
0
 def __init__(self, in_planes, out_planes, scale,
              is_aux=False, norm_layer=nn.BatchNorm2d):
     super(BiSeNetHead, self).__init__()
     if is_aux:
         self.conv_3x3 = ConvBnRelu(in_planes, 128, 3, 1, 1,
                                    has_bn=True, norm_layer=norm_layer,
                                    has_relu=True, has_bias=False)
     else:
         self.conv_3x3 = ConvBnRelu(in_planes, 64, 3, 1, 1,
                                    has_bn=True, norm_layer=norm_layer,
                                    has_relu=True, has_bias=False)
     # self.dropout = nn.Dropout(0.1)
     if is_aux:
         self.conv_1x1 = nn.Conv2d(128, out_planes, kernel_size=1,
                                   stride=1, padding=0)
     else:
         self.conv_1x1 = nn.Conv2d(64, out_planes, kernel_size=1,
                                   stride=1, padding=0)
     self.scale = scale
示例#24
0
 def __init__(self,
              in_planes,
              out_planes,
              scale,
              is_aux=False,
              norm_layer=nn.BatchNorm2d,
              alpha=1):
     super(FPNetHead, self).__init__()
     if is_aux:
         self.conv_3x3 = ConvBnRelu(in_planes,
                                    int(np.rint(256 * alpha)),
                                    3,
                                    1,
                                    1,
                                    has_bn=True,
                                    norm_layer=norm_layer,
                                    has_relu=True,
                                    has_bias=False)
     else:
         self.conv_3x3 = ConvBnRelu(in_planes,
                                    int(np.rint(64 * alpha)),
                                    3,
                                    1,
                                    1,
                                    has_bn=True,
                                    norm_layer=norm_layer,
                                    has_relu=True,
                                    has_bias=False)
     # self.dropout = nn.Dropout(0.1)
     if is_aux:
         self.conv_1x1 = nn.Conv2d(int(np.rint(256 * alpha)),
                                   out_planes,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
     else:
         self.conv_1x1 = nn.Conv2d(int(np.rint(64 * alpha)),
                                   out_planes,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
     self.scale = scale
示例#25
0
 def __init__(self, in_channels, norm_layer=nn.BatchNorm2d):
     super(ContextEmbedding, self).__init__()
     self.pooling = nn.AdaptiveAvgPool2d(1)
     self.bn = norm_layer(in_channels)
     self.point_wise_cbr = ConvBnRelu(in_channels,
                                      in_channels,
                                      1,
                                      1,
                                      0,
                                      has_bn=True,
                                      norm_layer=norm_layer,
                                      has_relu=False,
                                      has_bias=False)
示例#26
0
    def __init__(self, name, out_planes, fc_dim=4096, pool_scales=[1, 2, 3, 6],
                 norm_layer=nn.BatchNorm2d):
        super(PyramidPooling, self).__init__()

        self.ppm = []
        for scale in pool_scales:
            self.ppm.append(nn.Sequential(OrderedDict([
                ('{}/pool_1'.format(name), nn.AdaptiveAvgPool2d(scale)),
                ('{}/cbr'.format(name),
                 ConvBnRelu(fc_dim, 512, 1, 1, 0, has_bn=True,
                            has_relu=True, has_bias=False,
                            norm_layer=norm_layer))
            ])))
        self.ppm = nn.ModuleList(self.ppm)

        self.conv6 = nn.Sequential(
            ConvBnRelu(fc_dim + len(pool_scales) * 512, 512, 3, 1, 1,
                       has_bn=True,
                       has_relu=True, has_bias=False, norm_layer=norm_layer),
            nn.Dropout2d(0.1, inplace=False),
            nn.Conv2d(512, out_planes, kernel_size=1)
        )
示例#27
0
 def __init__(self, norm_layer=nn.BatchNorm2d):
     super(CAM1, self).__init__()
     self.collect_reduction = ConvBnRelu(2048,
                                         256,
                                         3,
                                         1,
                                         1,
                                         has_bn=True,
                                         has_relu=True,
                                         has_bias=False,
                                         norm_layer=norm_layer)
     self.query = ConvBnRelu(256,
                             64,
                             1,
                             1,
                             0,
                             has_bn=False,
                             has_relu=False,
                             has_bias=False,
                             norm_layer=norm_layer)
     self.key = ConvBnRelu(256,
                           64,
                           1,
                           1,
                           0,
                           has_bn=False,
                           has_relu=False,
                           has_bias=False,
                           norm_layer=norm_layer)
     self.value = ConvBnRelu(256,
                             256,
                             1,
                             1,
                             0,
                             has_bn=False,
                             has_relu=False,
                             has_bias=False,
                             norm_layer=norm_layer)
示例#28
0
    def __init__(self,
                 inplanes,
                 planes,
                 scale=1,
                 bn_eps=1e-5,
                 bn_momentum=0.1,
                 inplace=True,
                 norm_layer=nn.BatchNorm2d):
        super(InterFeatureDownsample, self).__init__()
        # self.downsamplelayers = []
        # for i in range(channel):
        #     self.downsamplelayers.append(ConvBnRelu(inplanes, inplanes*2, 3, 1, 1,
        #                       has_bn=True, norm_layer=norm_layer,
        #                       has_relu=True, inplace=inplace, has_bias=False))
        #     inplanes = inplanes*2
        # self.downsamplelayers = nn.Sequential(*self.downsamplelayers)
        self.scale = scale
        self.downsamplelayers = ConvBnRelu(inplanes,
                                           planes,
                                           3,
                                           1,
                                           1,
                                           has_bn=True,
                                           norm_layer=norm_layer,
                                           has_relu=True,
                                           inplace=inplace,
                                           has_bias=False)

        self.convTimes = ConvBnRelu(planes,
                                    planes,
                                    3,
                                    1,
                                    1,
                                    has_bn=True,
                                    norm_layer=norm_layer,
                                    has_relu=True,
                                    inplace=inplace,
                                    has_bias=False)
示例#29
0
    def __init__(self, name, in_channels=4096, pool_scales=[1, 2, 3, 6],
                 norm_layer=nn.BatchNorm2d):
        super(PyramidPooling, self).__init__()

        self.ppm = []
        for scale in pool_scales:
            self.ppm.append(nn.Sequential(OrderedDict([
                ('{}/pool_1'.format(name), nn.AdaptiveAvgPool2d(scale)),
                ('{}/cbr'.format(name),
                 ConvBnRelu(in_channels, 512, 1, 1, 0, has_bn=True,
                            has_relu=True, has_bias=False,
                            norm_layer=norm_layer))
            ])))
        self.ppm = nn.ModuleList(self.ppm)
示例#30
0
 def __init__(self, inplane, outplane, scale=4, norm_layer=nn.BatchNorm2d):
     super(deeperlab_seg_head, self).__init__()
     self.conv = ConvBnRelu(inplane,
                            256,
                            7,
                            1,
                            3,
                            norm_layer=norm_layer,
                            bn_eps=config.bn_eps)
     self.conv_seg = nn.Conv2d(256,
                               outplane,
                               kernel_size=1,
                               stride=1,
                               padding=0)
     self.scale = scale