Example #1
0
File: afnb.py Project: zzzhoudj/ANN
    def __init__(self,
                 low_in_channels,
                 high_in_channels,
                 key_channels,
                 value_channels,
                 out_channels=None,
                 scale=1,
                 norm_type=None,
                 psp_size=(1, 3, 6, 8)):
        super(_SelfAttentionBlock, self).__init__()
        self.scale = scale
        self.in_channels = low_in_channels
        self.out_channels = out_channels
        self.key_channels = key_channels
        self.value_channels = value_channels
        if out_channels == None:
            self.out_channels = high_in_channels
        self.pool = nn.MaxPool2d(kernel_size=(scale, scale))
        self.f_key = nn.Sequential(
            nn.Conv2d(in_channels=self.in_channels,
                      out_channels=self.key_channels,
                      kernel_size=1,
                      stride=1,
                      padding=0),
            ModuleHelper.BNReLU(self.key_channels, norm_type=norm_type),
        )
        self.f_query = nn.Sequential(
            nn.Conv2d(in_channels=high_in_channels,
                      out_channels=self.key_channels,
                      kernel_size=1,
                      stride=1,
                      padding=0),
            ModuleHelper.BNReLU(self.key_channels, norm_type=norm_type),
        )
        self.f_value = nn.Conv2d(in_channels=self.in_channels,
                                 out_channels=self.value_channels,
                                 kernel_size=1,
                                 stride=1,
                                 padding=0)
        self.W = nn.Conv2d(in_channels=self.value_channels,
                           out_channels=self.out_channels,
                           kernel_size=1,
                           stride=1,
                           padding=0)

        self.psp = PSPModule(psp_size)
        nn.init.constant_(self.W.weight, 0)
        nn.init.constant_(self.W.bias, 0)
Example #2
0
    def __init__(self, configer):
        self.inplanes = 128
        super(DeepLabV3, self).__init__()
        self.configer = configer
        self.num_classes = self.configer.get('data', 'num_classes')
        self.backbone = BackboneSelector(configer).get_backbone()

        self.head = nn.Sequential(
            ASPPModule(2048, bn_type=self.configer.get('network', 'bn_type')),
            nn.Conv2d(512,
                      self.num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
        self.dsn = nn.Sequential(
            nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1),
            ModuleHelper.BNReLU(512,
                                bn_type=self.configer.get(
                                    'network', 'bn_type')), nn.Dropout2d(0.1),
            nn.Conv2d(512,
                      self.num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
Example #3
0
    def __init__(self, configer):
        super(PSPNet, self).__init__()
        self.configer = configer
        self.num_classes = self.configer.get('data', 'num_classes')
        self.backbone = BackboneSelector(configer).get_backbone()
        num_features = self.backbone.get_num_features()
        self.dsn = nn.Sequential(
            _ConvBatchNormReluBlock(num_features // 2,
                                    num_features // 4,
                                    3,
                                    1,
                                    bn_type=self.configer.get(
                                        'network', 'bn_type')),
            nn.Dropout2d(0.1),
            nn.Conv2d(num_features // 4, self.num_classes, 1, 1, 0))
        self.ppm = PPMBilinearDeepsup(fc_dim=num_features,
                                      bn_type=self.configer.get(
                                          'network', 'bn_type'))

        self.cls = nn.Sequential(
            nn.Conv2d(num_features + 4 * 512,
                      512,
                      kernel_size=3,
                      padding=1,
                      bias=False),
            ModuleHelper.BNReLU(512,
                                bn_type=self.configer.get(
                                    'network', 'bn_type')), nn.Dropout2d(0.1),
            nn.Conv2d(512, self.num_classes, kernel_size=1))
Example #4
0
 def __init__(self, configer):
     super(asymmetric_non_local_network, self).__init__()
     self.configer = configer
     self.num_classes = self.configer.get('data', 'num_classes')
     self.backbone = BackboneSelector(configer).get_backbone()
     # low_in_channels, high_in_channels, out_channels, key_channels, value_channels, dropout
     self.fusion = AFNB(1024,
                        2048,
                        2048,
                        256,
                        256,
                        dropout=0.05,
                        sizes=([1]),
                        norm_type=self.configer.get('network', 'norm_type'))
     # extra added layers
     self.context = nn.Sequential(
         nn.Conv2d(2048, 512, kernel_size=3, stride=1, padding=1),
         ModuleHelper.BNReLU(512,
                             norm_type=self.configer.get(
                                 'network', 'norm_type')),
         APNB(in_channels=512,
              out_channels=512,
              key_channels=256,
              value_channels=256,
              dropout=0.05,
              sizes=([1]),
              norm_type=self.configer.get('network', 'norm_type')))
     self.cls = nn.Conv2d(512,
                          self.num_classes,
                          kernel_size=1,
                          stride=1,
                          padding=0,
                          bias=True)
     self.dsn = nn.Sequential(
         nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1),
         ModuleHelper.BNReLU(512,
                             norm_type=self.configer.get(
                                 'network', 'norm_type')),
         nn.Dropout2d(0.05),
         nn.Conv2d(512,
                   self.num_classes,
                   kernel_size=1,
                   stride=1,
                   padding=0,
                   bias=True))
Example #5
0
 def __init__(self, in_channels, out_channels, key_channels, value_channels, dropout, sizes=([1]), norm_type=None,psp_size=(1,3,6,8)):
     super(APNB, self).__init__()
     self.stages = []
     self.norm_type = norm_type
     self.psp_size=psp_size
     self.stages = nn.ModuleList(
         [self._make_stage(in_channels, out_channels, key_channels, value_channels, size) for size in sizes])
     self.conv_bn_dropout = nn.Sequential(
         nn.Conv2d(2 * in_channels, out_channels, kernel_size=1, padding=0),
         ModuleHelper.BNReLU(out_channels, norm_type=norm_type),
         nn.Dropout2d(dropout)
     )
Example #6
0
    def __init__(self, features, inner_features=512, out_features=512, dilations=(12, 24, 36), norm_type=None):
        super(ASPPModule, self).__init__()

        self.conv1 = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
                                   nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1,
                                             bias=False),
                                   ModuleHelper.BNReLU(inner_features, norm_type=norm_type))
        self.conv2 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1, bias=False),
            ModuleHelper.BNReLU(inner_features, norm_type=norm_type))
        self.conv3 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[0], dilation=dilations[0], bias=False),
            ModuleHelper.BNReLU(inner_features, norm_type=norm_type))
        self.conv4 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[1], dilation=dilations[1], bias=False),
            ModuleHelper.BNReLU(inner_features, norm_type=norm_type))
        self.conv5 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[2], dilation=dilations[2], bias=False),
            ModuleHelper.BNReLU(inner_features, norm_type=norm_type))

        self.bottleneck = nn.Sequential(
            nn.Conv2d(inner_features * 5, out_features, kernel_size=1, padding=0, dilation=1, bias=False),
            ModuleHelper.BNReLU(out_features, norm_type=norm_type),
            nn.Dropout2d(0.1)
        )
Example #7
0
    def __init__(self, fc_dim=4096, bn_type=None):
        super(PPMBilinearDeepsup, self).__init__()
        self.bn_type = bn_type
        pool_scales = (1, 2, 3, 6)
        self.ppm = []
        # assert bn_type == 'syncbn' or not self.training
        # Torch BN can't handle feature map size with 1x1.
        for scale in pool_scales:
            self.ppm.append(
                nn.Sequential(
                    nn.AdaptiveAvgPool2d(scale),
                    nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
                    ModuleHelper.BNReLU(512, bn_type=bn_type)))

        self.ppm = nn.ModuleList(self.ppm)
Example #8
0
 def __init__(self,
              inplanes,
              outplanes,
              kernel_size,
              stride,
              padding=1,
              dilation=1,
              bn_type=None):
     super(_ConvBatchNormReluBlock, self).__init__()
     self.conv = nn.Conv2d(in_channels=inplanes,
                           out_channels=outplanes,
                           kernel_size=kernel_size,
                           stride=stride,
                           padding=padding,
                           dilation=dilation,
                           bias=False)
     self.bn_relu = ModuleHelper.BNReLU(outplanes, bn_type=bn_type)
 def __init__(self,
              in_channels,
              key_channels,
              value_channels,
              out_channels=None,
              kernel_size=1,
              dilation_list=None,
              padding_list=None,
              stride=1,
              scale=1,
              bn_type=None):
     super(SelfAttentionModuleV2, self).__init__()
     self.scale = scale
     self.in_channels = in_channels
     self.out_channels = out_channels
     self.key_channels = key_channels
     self.value_channels = value_channels
     self.kernel_size = self._pair(kernel_size)
     self.padding_list = padding_list
     self.dilation_list = dilation_list
     self.stride = self._pair(stride)
     if out_channels is None:
         self.out_channels = in_channels
     self.pool = nn.MaxPool2d(kernel_size=(scale, scale))
     self.f_key = nn.Sequential(
         nn.Conv2d(in_channels=self.in_channels,
                   out_channels=self.key_channels,
                   kernel_size=1,
                   stride=1,
                   padding=0),
         ModuleHelper.BNReLU(self.key_channels, bn_type=bn_type),
         nn.Conv2d(in_channels=self.key_channels,
                   out_channels=self.key_channels,
                   kernel_size=1,
                   stride=1,
                   padding=0),
         ModuleHelper.BNReLU(self.key_channels, bn_type=bn_type),
     )
     self.f_query = nn.Sequential(
         nn.Conv2d(in_channels=self.in_channels,
                   out_channels=self.key_channels,
                   kernel_size=1,
                   stride=1,
                   padding=0),
         ModuleHelper.BNReLU(self.key_channels, bn_type=bn_type),
         nn.Conv2d(in_channels=self.key_channels,
                   out_channels=self.key_channels,
                   kernel_size=1,
                   stride=1,
                   padding=0),
         ModuleHelper.BNReLU(self.key_channels, bn_type=bn_type),
     )
     # self.f_query = self.f_key
     self.f_value = nn.Conv2d(in_channels=self.in_channels,
                              out_channels=self.value_channels,
                              kernel_size=1,
                              stride=1,
                              padding=0)
     self.W = nn.Conv2d(in_channels=self.value_channels,
                        out_channels=self.out_channels,
                        kernel_size=1,
                        stride=1,
                        padding=0)
     nn.init.constant_(self.W.weight, 0)
     nn.init.constant_(self.W.bias, 0)