Esempio n. 1
0
 def __init__(self, channels=128, norm_layer=None, norm_kwargs=None):
     super(_AttentionFusionModule, self).__init__()
     with self.name_scope():
         self.conv3x3_high = ConvModule2d(channels,
                                          3,
                                          1,
                                          1,
                                          norm_layer=norm_layer,
                                          norm_kwargs=norm_kwargs)
         self.conv3x3_low = ConvModule2d(channels,
                                         3,
                                         1,
                                         1,
                                         norm_layer=norm_layer,
                                         norm_kwargs=norm_kwargs)
         self.conv1x1_1 = ConvModule2d(channels,
                                       1,
                                       norm_layer=norm_layer,
                                       norm_kwargs=norm_kwargs)
         self.conv1x1_2 = ConvModule2d(channels,
                                       1,
                                       norm_layer=norm_layer,
                                       norm_kwargs=norm_kwargs,
                                       activation='sigmoid')
         self.gap = nn.GlobalAvgPool2D()
Esempio n. 2
0
 def __init__(self,
              nclass,
              channels,
              norm_layer=nn.BatchNorm,
              norm_kwargs=None):
     super(_DFPModule, self).__init__()
     with self.name_scope():
         self.blk_4 = ConvModule2d(channels,
                                   3,
                                   1,
                                   1,
                                   norm_layer=norm_layer,
                                   norm_kwargs=norm_kwargs)
         self.blk_3 = ConvModule2d(channels,
                                   3,
                                   1,
                                   1,
                                   norm_layer=norm_layer,
                                   norm_kwargs=norm_kwargs)
         self.blk_2 = ConvModule2d(channels,
                                   3,
                                   1,
                                   1,
                                   norm_layer=norm_layer,
                                   norm_kwargs=norm_kwargs)
         self.blk_1 = ConvModule2d(channels,
                                   3,
                                   1,
                                   1,
                                   norm_layer=norm_layer,
                                   norm_kwargs=norm_kwargs)
         self.head = FCNHead(nclass,
                             channels * 5,
                             norm_layer=norm_layer,
                             norm_kwargs=norm_kwargs)
Esempio n. 3
0
 def __init__(self, nclass, capacity=512, attention=False, drop=.1, norm_layer=nn.BatchNorm,
              norm_kwargs=None, height=120, width=120):
     super(_CAHead, self).__init__()
     self.up_kwargs = {'height': height, 'width': width}
     self.attention = attention
     self.gamma = 1.0
     height = height // 2
     width = width // 2
     with self.name_scope():
         # Chained Context Aggregation Module
         self.gp = GlobalFlow(capacity, 2048, norm_layer, norm_kwargs)
         self.cp1 = _ContextFlow(capacity, stride=2, norm_layer=norm_layer,
                                 norm_kwargs=norm_kwargs, height=height, width=width)
         self.cp2 = _ContextFlow(capacity, stride=4, norm_layer=norm_layer,
                                 norm_kwargs=norm_kwargs, height=height, width=width)
         self.cp3 = _ContextFlow(capacity, stride=8, norm_layer=norm_layer,
                                 norm_kwargs=norm_kwargs, height=height, width=width)
         self.cp4 = _ContextFlow(capacity, stride=16, norm_layer=norm_layer,
                                 norm_kwargs=norm_kwargs, height=height, width=width)
         if self.attention:
             self.selection = _FeatureSelection(256, in_channels=capacity, norm_layer=norm_layer,
                                                norm_kwargs=norm_kwargs)
         else:
             self.proj = ConvModule2d(256, 3, 1, 1, in_channels=capacity, norm_layer=norm_layer,
                                      norm_kwargs=norm_kwargs)
         self.drop = nn.Dropout(drop) if drop else None
         # decoder
         self.decoder = ConvModule2d(48, 3, 1, 1, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
         self.conv3x3 = ConvModule2d(256, 3, 1, 1, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
         # segmentation head
         self.seg_head = FCNHead(nclass, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
Esempio n. 4
0
 def _make_layer(stages,
                 channels,
                 norm_layer=nn.BatchNorm,
                 norm_kwargs=None):
     # scale: 1/4 --> 1/4
     if stages == 0:
         layer = ConvModule2d(channels,
                              3,
                              1,
                              1,
                              norm_layer=norm_layer,
                              norm_kwargs=norm_kwargs)
         return layer
     # n = 2 ^ stages
     # scale: 1/(4 * n) --> 1/4
     layer = HybridSequentialUpscale()
     for _ in range(stages):
         layer.add(
             ConvModule2d(channels,
                          3,
                          1,
                          1,
                          norm_layer=norm_layer,
                          norm_kwargs=norm_kwargs))
     return layer
Esempio n. 5
0
 def __init__(self,
              channels,
              inter_channels=64,
              norm_layer=nn.BatchNorm,
              norm_kwargs=None):
     super(_SpatialPath, self).__init__()
     with self.name_scope():
         self.conv7x7 = ConvModule2d(inter_channels,
                                     7,
                                     2,
                                     3,
                                     norm_layer=norm_layer,
                                     norm_kwargs=norm_kwargs)
         self.conv3x3_1 = ConvModule2d(inter_channels,
                                       3,
                                       2,
                                       1,
                                       norm_layer=norm_layer,
                                       norm_kwargs=norm_kwargs)
         self.conv3x3_2 = ConvModule2d(inter_channels,
                                       3,
                                       2,
                                       1,
                                       norm_layer=norm_layer,
                                       norm_kwargs=norm_kwargs)
         self.conv1x1 = ConvModule2d(channels,
                                     1,
                                     norm_layer=norm_layer,
                                     norm_kwargs=norm_kwargs)
Esempio n. 6
0
 def __init__(self,
              nclass,
              in_channels,
              norm_layer=nn.BatchNorm,
              norm_kwargs=None):
     super(_DeepLabHead, self).__init__()
     with self.name_scope():
         self.aspp = ASPPModule(256,
                                in_channels,
                                norm_layer,
                                norm_kwargs,
                                rates=(12, 24, 36))
         self.conv_c1 = ConvModule2d(48,
                                     3,
                                     1,
                                     1,
                                     norm_layer=norm_layer,
                                     norm_kwargs=norm_kwargs)
         self.conv3x3 = ConvModule2d(256,
                                     3,
                                     1,
                                     1,
                                     in_channels=304,
                                     norm_layer=norm_layer,
                                     norm_kwargs=norm_kwargs)
         self.drop = nn.Dropout(0.5)
         self.head = FCNHead(nclass, 256, norm_layer, norm_kwargs)
Esempio n. 7
0
 def __init__(self, channels, in_channels, norm_layer=nn.BatchNorm, norm_kwargs=None):
     super(_FeatureSelection, self).__init__()
     with self.name_scope():
         self.conv3x3 = ConvModule2d(channels, 3, 1, 1, in_channels=in_channels,
                                     norm_layer=norm_layer, norm_kwargs=norm_kwargs)
         self.gap = nn.GlobalAvgPool2D()
         self.conv1x1 = ConvModule2d(channels, 1, in_channels=channels, norm_layer=norm_layer,
                                     norm_kwargs=norm_kwargs, activation='sigmoid')
Esempio n. 8
0
 def __init__(self, channels, stride, groups=4, norm_layer=nn.BatchNorm,
              norm_kwargs=None, height=60, width=60):
     super(_ContextFlowShuffle, self).__init__()
     self.stride = stride
     self.groups = groups
     self.up_kwargs = {'height': height, 'width': width}
     with self.name_scope():
         self.conv1 = ConvModule2d(channels, 3, 1, 1, groups=groups, norm_layer=norm_layer,
                                   norm_kwargs=norm_kwargs, activation='relu')
         self.conv2 = ConvModule2d(channels, 3, 1, 1, groups=groups, norm_layer=norm_layer,
                                   norm_kwargs=norm_kwargs, activation='relu')
Esempio n. 9
0
 def _make_fusion(channels=256, norm_layer=nn.BatchNorm, norm_kwargs=None):
     fusion = nn.HybridSequential()
     fusion.add(
         ConvModule2d(channels,
                      3,
                      1,
                      1,
                      norm_layer=norm_layer,
                      norm_kwargs=norm_kwargs),
         ConvModule2d(channels,
                      3,
                      1,
                      1,
                      norm_layer=norm_layer,
                      norm_kwargs=norm_kwargs))
     return fusion
Esempio n. 10
0
 def __init__(self,
              channels,
              in_channels,
              norm_layer=nn.BatchNorm,
              norm_kwargs=None):
     super(_ACFModule, self).__init__()
     with self.name_scope():
         self.conv_1 = ConvModule2d(channels,
                                    1,
                                    in_channels=in_channels,
                                    norm_layer=norm_layer,
                                    norm_kwargs=norm_kwargs)
         self.conv_2 = ConvModule2d(channels,
                                    1,
                                    in_channels=in_channels,
                                    norm_layer=norm_layer,
                                    norm_kwargs=norm_kwargs)
Esempio n. 11
0
 def __init__(self, nclass, norm_layer=nn.BatchNorm, norm_kwargs=None):
     super(_SegHead, self).__init__()
     with self.name_scope():
         self.conv1 = ConvModule2d(256,
                                   1,
                                   norm_layer=norm_layer,
                                   norm_kwargs=norm_kwargs)
         self.conv2 = nn.Conv2D(nclass, 1, in_channels=256)
Esempio n. 12
0
 def __init__(self,
              capacity,
              norm_layer=nn.BatchNorm,
              norm_kwargs=None,
              is_final=False):
     super(_LateralFusion, self).__init__()
     self.is_final = is_final
     with self.name_scope():
         self.conv1x1 = ConvModule2d(capacity,
                                     1,
                                     norm_layer=norm_layer,
                                     norm_kwargs=norm_kwargs)
         self.conv3x3 = ConvModule2d(capacity,
                                     3,
                                     1,
                                     1,
                                     norm_layer=norm_layer,
                                     norm_kwargs=norm_kwargs)
Esempio n. 13
0
    def __init__(self, nclass, in_channels, norm_layer=nn.BatchNorm, norm_kwargs=None):
        super(_DANetHead, self).__init__()
        inter_channels = in_channels // 4
        with self.name_scope():
            self.compress_pam = ConvModule2d(inter_channels, 3, 1, 1, in_channels=in_channels,
                                             norm_layer=norm_layer, norm_kwargs=norm_kwargs)
            self.pam = SelfAttentionModule(inter_channels)
            self.proj_pam = ConvModule2d(inter_channels, 3, 1, 1, in_channels=inter_channels,
                                         norm_layer=norm_layer, norm_kwargs=norm_kwargs)

            self.compress_cam = ConvModule2d(inter_channels, 3, 1, 1, in_channels=in_channels,
                                             norm_layer=norm_layer, norm_kwargs=norm_kwargs)
            self.cam = _CAModule(inter_channels)
            self.proj_cam = ConvModule2d(inter_channels, 3, 1, 1, in_channels=inter_channels,
                                         norm_layer=norm_layer, norm_kwargs=norm_kwargs)

            self.head = nn.HybridSequential()
            self.head.add(nn.Dropout(0.1))
            self.head.add(nn.Conv2D(nclass, 1, in_channels=inter_channels))
Esempio n. 14
0
 def __init__(self,
              channels,
              atrous_rates,
              in_channels,
              norm_layer=nn.BatchNorm,
              norm_kwargs=None,
              activation='prelu',
              light=False,
              **kwargs):
     super(_MPUnit, self).__init__()
     with self.name_scope():
         self.concurrent = HybridConcurrent(
             axis=1) if not light else HybridConcurrentSum()
         for i in range(len(atrous_rates)):
             rate = atrous_rates[i]
             self.concurrent.add(
                 ConvModule2d(channels,
                              3,
                              1,
                              padding=rate,
                              dilation=rate,
                              groups=in_channels,
                              in_channels=in_channels,
                              norm_layer=norm_layer,
                              norm_kwargs=norm_kwargs,
                              activation=activation))
         if not light:
             self.concurrent.add(
                 ConvModule2d(channels,
                              1,
                              in_channels=in_channels,
                              norm_layer=norm_layer,
                              norm_kwargs=norm_kwargs,
                              activation=activation))
             self.conv1x1 = ConvModule2d(channels,
                                         1,
                                         norm_layer=norm_layer,
                                         norm_kwargs=norm_kwargs,
                                         activation=activation)
         else:
             self.conv1x1 = None
Esempio n. 15
0
 def __init__(self, nclass, norm_layer=nn.BatchNorm, norm_kwargs=None):
     super(_AttaNetHead, self).__init__()
     with self.name_scope():
         self.afm = _AttentionFusionModule(128, norm_layer, norm_kwargs)
         self.conv3x3 = ConvModule2d(128,
                                     3,
                                     1,
                                     1,
                                     norm_layer=norm_layer,
                                     norm_kwargs=norm_kwargs)
         self.sam = _StripAttentionModule(128, norm_layer, norm_kwargs)
         self.seg = FCNHead(nclass, 128, norm_layer, norm_kwargs)
Esempio n. 16
0
 def __init__(self,
              channels,
              norm_layer=nn.BatchNorm,
              norm_kwargs=None,
              reduction=1):
     super(_FFModule, self).__init__()
     with self.name_scope():
         self.proj = ConvModule2d(channels,
                                  1,
                                  norm_layer=norm_layer,
                                  norm_kwargs=norm_kwargs)
         self.gvp = nn.GlobalAvgPool2D()
         self.conv1x1_1 = ConvModule2d(channels // reduction,
                                       1,
                                       norm_layer=norm_layer,
                                       norm_kwargs=norm_kwargs)
         self.conv1x1_2 = ConvModule2d(channels,
                                       1,
                                       norm_layer=norm_layer,
                                       norm_kwargs=norm_kwargs,
                                       activation='sigmoid')
Esempio n. 17
0
 def __init__(self, nclass, aux, norm_layer=nn.BatchNorm, norm_kwargs=None):
     super(_PUPHead, self).__init__()
     self.aux = aux
     with self.name_scope():
         self.conv0 = ConvModule2d(256,
                                   3,
                                   1,
                                   1,
                                   norm_layer=norm_layer,
                                   norm_kwargs=norm_kwargs)
         self.conv1 = ConvModule2d(256,
                                   3,
                                   1,
                                   1,
                                   norm_layer=norm_layer,
                                   norm_kwargs=norm_kwargs)
         self.conv2 = ConvModule2d(256,
                                   3,
                                   1,
                                   1,
                                   norm_layer=norm_layer,
                                   norm_kwargs=norm_kwargs)
         self.conv3 = ConvModule2d(256,
                                   3,
                                   1,
                                   1,
                                   norm_layer=norm_layer,
                                   norm_kwargs=norm_kwargs)
         self.conv4 = ConvModule2d(nclass,
                                   3,
                                   1,
                                   1,
                                   norm_layer=norm_layer,
                                   norm_kwargs=norm_kwargs)
         if self.aux:
             self.aux_head = HybridConcurrentIsolate()
             self.aux_head.add(_SegHead(nclass, norm_layer, norm_kwargs),
                               _SegHead(nclass, norm_layer, norm_kwargs),
                               _SegHead(nclass, norm_layer, norm_kwargs),
                               _SegHead(nclass, norm_layer, norm_kwargs))
Esempio n. 18
0
    def __init__(self,
                 in_channels,
                 mid_channels,
                 strides,
                 dilation=1,
                 norm_layer=nn.BatchNorm,
                 norm_kwargs=None,
                 activation='relu'):
        super(Block, self).__init__()
        if strides > 1:
            self.down = ConvModule2d(mid_channels * self.expansion,
                                     1,
                                     strides=strides,
                                     use_bias=False,
                                     in_channels=in_channels,
                                     norm_layer=norm_layer,
                                     norm_kwargs=norm_kwargs,
                                     activation=None)
        else:
            self.down = None

        self.residual = nn.HybridSequential()
        self.residual.add(
            DepthwiseSeparableConv2d(mid_channels,
                                     in_channels,
                                     3,
                                     strides,
                                     dilation,
                                     dilation,
                                     norm_layer=norm_layer,
                                     norm_kwargs=norm_kwargs,
                                     activation=activation,
                                     pattern='xception'),
            DepthwiseSeparableConv2d(mid_channels,
                                     mid_channels,
                                     3,
                                     1,
                                     1,
                                     norm_layer=norm_layer,
                                     norm_kwargs=norm_kwargs,
                                     activation=activation,
                                     pattern='xception'),
            DepthwiseSeparableConv2d(mid_channels * self.expansion,
                                     mid_channels,
                                     3,
                                     1,
                                     1,
                                     norm_layer=norm_layer,
                                     norm_kwargs=norm_kwargs,
                                     activation=None,
                                     pattern='xception'))
        self.act = Activation(activation)
Esempio n. 19
0
 def __init__(self,
              in_channels,
              norm_layer=None,
              norm_kwargs=None,
              reduction=2):
     super(_StripAttentionModule, self).__init__()
     with self.name_scope():
         self.query_conv = ConvModule2d(in_channels // reduction,
                                        1,
                                        in_channels=in_channels,
                                        norm_layer=norm_layer,
                                        norm_kwargs=norm_kwargs)
         self.key_conv = ConvModule2d(in_channels // reduction,
                                      1,
                                      in_channels=in_channels,
                                      norm_layer=norm_layer,
                                      norm_kwargs=norm_kwargs)
         self.value_conv = ConvModule2d(in_channels,
                                        1,
                                        in_channels=in_channels,
                                        norm_layer=norm_layer,
                                        norm_kwargs=norm_kwargs)
Esempio n. 20
0
 def __init__(self,
              nclass,
              low_channels=256,
              high_channels=128,
              norm_layer=nn.BatchNorm,
              norm_kwargs=None,
              drop=.1):
     super(_BoundaryAttention, self).__init__()
     with self.name_scope():
         self.conv1x1 = ConvModule2d(low_channels,
                                     1,
                                     in_channels=high_channels,
                                     norm_layer=norm_layer,
                                     norm_kwargs=norm_kwargs,
                                     activation='sigmoid')
         self.fconv1x1 = ConvModule2d(high_channels,
                                      1,
                                      in_channels=low_channels,
                                      norm_layer=norm_layer,
                                      norm_kwargs=norm_kwargs)
         self.fconv3x3 = ConvModule2d(high_channels,
                                      3,
                                      1,
                                      1,
                                      in_channels=high_channels,
                                      norm_layer=norm_layer,
                                      norm_kwargs=norm_kwargs)
         self.cconv3x3 = ConvModule2d(high_channels,
                                      3,
                                      1,
                                      1,
                                      in_channels=high_channels,
                                      norm_layer=norm_layer,
                                      norm_kwargs=norm_kwargs)
         self.drop = nn.Dropout(drop) if drop else None
         self.cconv1x1 = nn.Conv2D(nclass, 1, in_channels=high_channels)
Esempio n. 21
0
 def __init__(self,
              nclass,
              in_channels,
              capacity=256,
              norm_layer=nn.BatchNorm,
              norm_kwargs=None):
     super(_SwiftNetHead, self).__init__()
     with self.name_scope():
         self.ppool = PPModule(in_channels, norm_layer, norm_kwargs)
         self.conv_c4 = ConvModule2d(capacity,
                                     1,
                                     norm_layer=norm_layer,
                                     norm_kwargs=norm_kwargs)
         self.fusion_c3 = LateralFusion(capacity, norm_layer, norm_kwargs)
         self.fusion_c2 = LateralFusion(capacity, norm_layer, norm_kwargs)
         self.fusion_c1 = LateralFusion(capacity, norm_layer, norm_kwargs)
         self.seg_head = FCNHead(nclass, capacity, norm_layer, norm_kwargs)
Esempio n. 22
0
 def __init__(self,
              channels,
              in_channels,
              norm_layer=nn.BatchNorm,
              norm_kwargs=None):
     super(_FAModule, self).__init__()
     with self.name_scope():
         self.fsm = _FSModule(channels,
                              in_channels,
                              norm_layer=norm_layer,
                              norm_kwargs=norm_kwargs)
         self.offset = ConvModule2d(channels,
                                    1,
                                    norm_layer=norm_layer,
                                    norm_kwargs=norm_kwargs)
         self.align = cnn.ModulatedDeformableConvolution(
             channels, 3, 1, 1, num_deformable_group=8)
         self.relu = nn.Activation('relu')
Esempio n. 23
0
 def __init__(self,
              nclass,
              decoder_capacity,
              norm_layer=nn.BatchNorm,
              norm_kwargs=None):
     super(_LadderHead, self).__init__()
     with self.name_scope():
         self.conv_c4 = ConvModule2d(decoder_capacity,
                                     1,
                                     norm_layer=norm_layer,
                                     norm_kwargs=norm_kwargs)
         self.fusion_c3 = LateralFusion(decoder_capacity, norm_layer,
                                        norm_kwargs)
         self.fusion_c2 = LateralFusion(decoder_capacity, norm_layer,
                                        norm_kwargs)
         self.fusion_c1 = LateralFusion(decoder_capacity, norm_layer,
                                        norm_kwargs)
         self.seg_head = FCNHead(nclass, decoder_capacity, norm_layer,
                                 norm_kwargs)
Esempio n. 24
0
 def __init__(self,
              capacity=256,
              norm_layer=nn.BatchNorm,
              norm_kwargs=None):
     super(_FPNBranch, self).__init__()
     with self.name_scope():
         self.conv = ConvModule2d(capacity,
                                  1,
                                  norm_layer=norm_layer,
                                  norm_kwargs=norm_kwargs)
         self.lateral16x = LateralFusion(capacity,
                                         norm_layer=norm_layer,
                                         norm_kwargs=norm_kwargs)
         self.lateral8x = LateralFusion(capacity,
                                        norm_layer=norm_layer,
                                        norm_kwargs=norm_kwargs)
         self.lateral4x = LateralFusion(capacity,
                                        norm_layer=norm_layer,
                                        norm_kwargs=norm_kwargs)
Esempio n. 25
0
 def __init__(self,
              nclass,
              capacity=128,
              norm_layer=nn.BatchNorm,
              norm_kwargs=None):
     super(_SwiftNetHead, self).__init__()
     with self.name_scope():
         self.conv1x1 = ConvModule2d(capacity,
                                     1,
                                     norm_layer=norm_layer,
                                     norm_kwargs=norm_kwargs)
         self.fusion_32x = _LateralFusion(capacity, norm_layer, norm_kwargs)
         self.fusion_16x = _LateralFusion(capacity, norm_layer, norm_kwargs)
         self.fusion_8x = _LateralFusion(capacity, norm_layer, norm_kwargs)
         self.final = _LateralFusion(capacity,
                                     norm_layer,
                                     norm_kwargs,
                                     is_final=True)
         self.seg_head = FCNHead(nclass, capacity, norm_layer, norm_kwargs)
Esempio n. 26
0
 def __init__(self,
              nclass,
              norm_layer=nn.BatchNorm,
              norm_kwargs=None,
              use_sigmoid=True):
     super(_AttentionHead, self).__init__()
     self.sigmoid = use_sigmoid
     with self.name_scope():
         self.seg_head = FCNHead(nclass,
                                 norm_layer=norm_layer,
                                 norm_kwargs=norm_kwargs)
         self.conv3x3 = ConvModule2d(512,
                                     3,
                                     1,
                                     1,
                                     norm_layer=norm_layer,
                                     norm_kwargs=norm_kwargs)
         if use_sigmoid:
             self.conv1x1 = nn.Conv2D(1, 1, in_channels=512)
         else:
             self.conv1x1 = nn.Conv2D(2, 1, in_channels=512)
Esempio n. 27
0
 def __init__(self,
              nclass,
              in_channels_group,
              capacity=256,
              norm_layer=nn.BatchNorm,
              norm_kwargs=None):
     super(_FaPNHead, self).__init__()
     with self.name_scope():
         self.conv = ConvModule2d(capacity,
                                  1,
                                  norm_layer=norm_layer,
                                  norm_kwargs=norm_kwargs)
         self.align_c3 = _FAModule(capacity, in_channels_group[2],
                                   norm_layer, norm_kwargs)
         self.align_c2 = _FAModule(capacity, in_channels_group[1],
                                   norm_layer, norm_kwargs)
         self.align_c1 = _FAModule(capacity, in_channels_group[0],
                                   norm_layer, norm_kwargs)
         self.seg = FCNHead(nclass,
                            norm_layer=norm_layer,
                            norm_kwargs=norm_kwargs)
Esempio n. 28
0
    def __init__(self,
                 channels,
                 in_channels,
                 atrous_rates,
                 norm_layer=nn.BatchNorm,
                 norm_kwargs=None,
                 activation='prelu',
                 down_sample=False,
                 light=False):
        super(_EPRModule, self).__init__()
        stride = 2 if down_sample else 1
        with self.name_scope():
            self.pyramid = _MPUnit(channels,
                                   atrous_rates,
                                   in_channels,
                                   norm_layer,
                                   norm_kwargs,
                                   activation=activation,
                                   light=light)
            self.compact = ConvModule2d(channels,
                                        3,
                                        stride,
                                        1,
                                        in_channels=channels,
                                        norm_layer=norm_layer,
                                        norm_kwargs=norm_kwargs,
                                        activation=None)

            if (channels != in_channels) or down_sample:
                self.skip = nn.Conv2D(channels,
                                      kernel_size=1,
                                      strides=stride,
                                      use_bias=False,
                                      in_channels=in_channels)
                self.skip_bn = norm_layer(
                    **({} if norm_kwargs is None else norm_kwargs))
            else:
                self.skip = None

            self.act = Activation(activation)
Esempio n. 29
0
 def __init__(self,
              block,
              layers,
              channels,
              classes=1000,
              norm_layer=nn.BatchNorm,
              norm_kwargs=None,
              **kwargs):
     super(Xception, self).__init__()
     self.in_channels = 8
     self.norm_kwargs = norm_kwargs if norm_kwargs is not None else {}
     with self.name_scope():
         self.conv1 = ConvModule2d(self.in_channels,
                                   3,
                                   2,
                                   1,
                                   use_bias=False,
                                   norm_layer=norm_layer,
                                   norm_kwargs=norm_kwargs,
                                   activation='relu')
         self.maxpool = nn.MaxPool2D(pool_size=3, strides=2, padding=1)
         self.layer1 = self._make_layer(block,
                                        norm_layer,
                                        layers[0],
                                        channels[0],
                                        strides=2)
         self.layer2 = self._make_layer(block,
                                        norm_layer,
                                        layers[1],
                                        channels[1],
                                        strides=2)
         self.layer3 = self._make_layer(block,
                                        norm_layer,
                                        layers[2],
                                        channels[2],
                                        strides=2)
         self.avgpool = nn.GlobalAvgPool2D()
         self.flat = nn.Flatten()
         self.fc = nn.Dense(in_units=self.in_channels, units=classes)
Esempio n. 30
0
 def __init__(self, nclass, norm_layer=nn.BatchNorm, norm_kwargs=None):
     super(_BiSeNetHead, self).__init__()
     with self.name_scope():
         self.spatial_path = _SpatialPath(128,
                                          norm_layer=norm_layer,
                                          norm_kwargs=norm_kwargs)
         self.global_flow = GlobalFlow(128,
                                       norm_layer=norm_layer,
                                       norm_kwargs=norm_kwargs)
         self.refine_c4 = _ARModule(128, norm_layer, norm_kwargs)
         self.refine_c3 = _ARModule(128, norm_layer, norm_kwargs)
         self.proj = ConvModule2d(128,
                                  3,
                                  1,
                                  1,
                                  norm_layer=norm_layer,
                                  norm_kwargs=norm_kwargs)
         self.fusion = _FFModule(256, norm_layer, norm_kwargs, reduction=1)
         self.seg = FCNHead(nclass,
                            256,
                            norm_layer,
                            norm_kwargs,
                            drop_out=.0)