def __init__(self, in_channels, out_channels, num_classes, configer):
        super(NLModule_nowd, self).__init__()
        inter_channels = in_channels // 4
        self.configer = configer

        self.conva = nn.Sequential(nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
                                   ModuleHelper.BNReLU(inter_channels, norm_type=self.configer.get('network', 'norm_type')))

        self.ctb = NonLocal2d_nowd(inter_channels, inter_channels // 2,
                                   downsample=self.configer.get('nonlocal', 'downsample'),
                                   whiten_type=self.configer.get('nonlocal', 'whiten_type'), 
                                   weight_init_scale=self.configer.get('nonlocal', 'weight_init_scale'),
                                   with_gc=self.configer.get('nonlocal', 'with_gc'),
                                   with_nl=self.configer.get('nonlocal', 'with_nl'),
                                   nowd=self.configer.get('nonlocal', 'nowd'),
                                   use_out=self.configer.get('nonlocal', 'use_out'),
                                   out_bn=self.configer.get('nonlocal', 'out_bn'))

        self.convb = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),
                                   ModuleHelper.BNReLU(inter_channels, norm_type=self.configer.get('network', 'norm_type')))

        self.bottleneck = nn.Sequential(
            nn.Conv2d(in_channels+inter_channels, out_channels, kernel_size=3, padding=1, dilation=1, bias=False),
            ModuleHelper.BNReLU(out_channels, norm_type=self.configer.get('network', 'norm_type')),
            nn.Dropout2d(0.1),
            nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True)
            )
示例#2
0
    def __init__(self, low_in_channels, high_in_channels, key_channels, value_channels, out_channels=None, scale=1, norm_type=None,psp_size=(1,3,6,8)):
        super(_SelfAttentionBlock, self).__init__()
        self.scale = scale
        self.in_channels = low_in_channels
        self.out_channels = out_channels
        self.key_channels = key_channels
        self.value_channels = value_channels
        if out_channels == None:
            self.out_channels = high_in_channels
        self.pool = nn.MaxPool2d(kernel_size=(scale, scale))
        self.f_key = nn.Sequential(
            nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels,
                      kernel_size=1, stride=1, padding=0),
            ModuleHelper.BNReLU(self.key_channels, norm_type=norm_type),
        )
        self.f_query = nn.Sequential(
            nn.Conv2d(in_channels=high_in_channels, out_channels=self.key_channels,
                      kernel_size=1, stride=1, padding=0),
            ModuleHelper.BNReLU(self.key_channels, norm_type=norm_type),
        )
        self.f_value = nn.Conv2d(in_channels=self.in_channels, out_channels=self.value_channels,
                                 kernel_size=1, stride=1, padding=0)
        self.W = nn.Conv2d(in_channels=self.value_channels, out_channels=self.out_channels,
                           kernel_size=1, stride=1, padding=0)

        self.psp = PSPModule(psp_size)
        nn.init.constant_(self.W.weight, 0)
        nn.init.constant_(self.W.bias, 0)
    def __init__(self, in_channels, out_channels, num_classes, configer):
        super(GCBModule, self).__init__()
        inter_channels = in_channels // 4
        self.configer = configer
        self.conva = nn.Sequential(
            nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
            ModuleHelper.BNReLU(inter_channels,
                                norm_type=self.configer.get(
                                    'network', 'norm_type')))
        self.ctb = ContextBlock(inter_channels, ratio=1. / 4)
        self.convb = nn.Sequential(
            nn.Conv2d(inter_channels, inter_channels, 3, padding=1,
                      bias=False),
            ModuleHelper.BNReLU(inter_channels,
                                norm_type=self.configer.get(
                                    'network', 'norm_type')))

        self.bottleneck = nn.Sequential(
            nn.Conv2d(in_channels + inter_channels,
                      out_channels,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=False),
            ModuleHelper.BNReLU(out_channels,
                                norm_type=self.configer.get(
                                    'network', 'norm_type')),
            nn.Dropout2d(0.1),
            nn.Conv2d(512,
                      num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
    def __init__(self, in_channels, out_channels, num_classes, configer):
        super(NLModule, self).__init__()
        inter_channels = in_channels // 2
        self.configer = configer
        self.down = self.configer.get('nonlocal', 'downsample')
        self.whiten_type = self.configer.get('nonlocal', 'whiten_type')
        self.temp = self.configer.get('nonlocal', 'temp')
        self.with_gc = self.configer.get('nonlocal', 'with_gc')
        self.with_unary = self.configer.get('nonlocal',
                                            'with_unary',
                                            default=False)
        self.use_out = self.configer.get('nonlocal', 'use_out')
        self.out_bn = self.configer.get('nonlocal', 'out_bn')
        self.conva = nn.Sequential(
            nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
            ModuleHelper.BNReLU(inter_channels,
                                norm_type=self.configer.get(
                                    'network', 'norm_type')))

        self.ctb = NonLocal2d_bn(inter_channels,
                                 inter_channels // 2,
                                 downsample=self.down,
                                 whiten_type=self.whiten_type,
                                 temperature=self.temp,
                                 with_gc=self.with_gc,
                                 with_unary=self.with_unary,
                                 use_out=self.use_out,
                                 out_bn=self.out_bn)

        self.convb = nn.Sequential(
            nn.Conv2d(inter_channels, inter_channels, 3, padding=1,
                      bias=False),
            ModuleHelper.BNReLU(inter_channels,
                                norm_type=self.configer.get(
                                    'network', 'norm_type')))

        self.bottleneck = nn.Sequential(
            nn.Conv2d(in_channels + inter_channels,
                      out_channels,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=False),
            ModuleHelper.BNReLU(out_channels,
                                norm_type=self.configer.get(
                                    'network', 'norm_type')),
            nn.Dropout2d(0.1),
            nn.Conv2d(512,
                      num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
示例#5
0
    def __init__(self, configer):
        super(PSPNet, self).__init__()
        self.configer = configer
        self.num_classes = self.configer.get('data', 'num_classes')
        self.backbone = BackboneSelector(configer).get_backbone()
        num_features = self.backbone.get_num_features()
        self.dsn = nn.Sequential(
            _ConvBatchNormReluBlock(num_features // 2,
                                    num_features // 4,
                                    3,
                                    1,
                                    norm_type=self.configer.get(
                                        'network', 'norm_type')),
            nn.Dropout2d(0.1),
            nn.Conv2d(num_features // 4, self.num_classes, 1, 1, 0))
        self.ppm = PPMBilinearDeepsup(fc_dim=num_features,
                                      norm_type=self.configer.get(
                                          'network', 'norm_type'))

        self.cls = nn.Sequential(
            nn.Conv2d(num_features + 4 * 512,
                      512,
                      kernel_size=3,
                      padding=1,
                      bias=False),
            ModuleHelper.BNReLU(512,
                                norm_type=self.configer.get(
                                    'network', 'norm_type')),
            nn.Dropout2d(0.1), nn.Conv2d(512, self.num_classes, kernel_size=1))
        self.valid_loss_dict = configer.get('loss', 'loss_weights',
                                            configer.get('loss.loss_type'))
示例#6
0
    def __init__(self, configer):
        self.inplanes = 128
        super(DeepLabV3, self).__init__()
        self.configer = configer
        self.num_classes = self.configer.get('data', 'num_classes')
        self.backbone = BackboneSelector(configer).get_backbone()

        self.head = nn.Sequential(
            ASPPModule(self.backbone.get_num_features(),
                       norm_type=self.configer.get('network', 'norm_type')),
            nn.Conv2d(512,
                      self.num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
        self.dsn = nn.Sequential(
            nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1),
            ModuleHelper.BNReLU(512,
                                norm_type=self.configer.get(
                                    'network', 'norm_type')),
            nn.Dropout2d(0.1),
            nn.Conv2d(512,
                      self.num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
        self.valid_loss_dict = configer.get('loss', 'loss_weights',
                                            configer.get('loss.loss_type'))
示例#7
0
 def __init__(self, configer):
     super(asymmetric_non_local_network, self).__init__()
     self.configer = configer
     self.num_classes = self.configer.get('data', 'num_classes')
     self.backbone = BackboneSelector(configer).get_backbone()
     # low_in_channels, high_in_channels, out_channels, key_channels, value_channels, dropout
     self.fusion = AFNB(1024,
                        2048,
                        2048,
                        256,
                        256,
                        dropout=0.05,
                        sizes=([1]),
                        norm_type=self.configer.get('network', 'norm_type'))
     # extra added layers
     self.context = nn.Sequential(
         nn.Conv2d(2048, 512, kernel_size=3, stride=1, padding=1),
         ModuleHelper.BNReLU(512,
                             norm_type=self.configer.get(
                                 'network', 'norm_type')),
         APNB(in_channels=512,
              out_channels=512,
              key_channels=256,
              value_channels=256,
              dropout=0.05,
              sizes=([1]),
              norm_type=self.configer.get('network', 'norm_type')))
     self.cls = nn.Conv2d(512,
                          self.num_classes,
                          kernel_size=1,
                          stride=1,
                          padding=0,
                          bias=True)
     self.dsn = nn.Sequential(
         nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1),
         ModuleHelper.BNReLU(512,
                             norm_type=self.configer.get(
                                 'network', 'norm_type')),
         nn.Dropout2d(0.05),
         nn.Conv2d(512,
                   self.num_classes,
                   kernel_size=1,
                   stride=1,
                   padding=0,
                   bias=True))
     self.valid_loss_dict = configer.get('loss', 'loss_weights',
                                         configer.get('loss.loss_type'))
示例#8
0
    def __init__(self, in_channels, out_channels, num_classes, configer):
        super(UNModule, self).__init__()
        inter_channels = in_channels // 4
        self.configer = configer
        self.down = self.configer.get('unary', 'downsample')
        self.use_out = self.configer.get('unary', 'use_out')
        self.out_bn = self.configer.get('unary', 'out_bn')
        self.conva = nn.Sequential(
            nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
            ModuleHelper.BNReLU(inter_channels,
                                norm_type=self.configer.get(
                                    'network', 'norm_type')))

        self.ctb = Unary2d(inter_channels,
                           inter_channels // 2,
                           downsample=self.down,
                           use_out=self.use_out,
                           out_bn=self.out_bn)

        self.convb = nn.Sequential(
            nn.Conv2d(inter_channels, inter_channels, 3, padding=1,
                      bias=False),
            ModuleHelper.BNReLU(inter_channels,
                                norm_type=self.configer.get(
                                    'network', 'norm_type')))

        self.bottleneck = nn.Sequential(
            nn.Conv2d(in_channels + inter_channels,
                      out_channels,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=False),
            ModuleHelper.BNReLU(out_channels,
                                norm_type=self.configer.get(
                                    'network', 'norm_type')),
            nn.Dropout2d(0.1),
            nn.Conv2d(512,
                      num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
示例#9
0
    def __init__(self, features, inner_features=512, out_features=512, dilations=(12, 24, 36), norm_type=None):
        super(ASPPModule, self).__init__()

        self.conv1 = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
                                   nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1,
                                             bias=False),
                                   ModuleHelper.BNReLU(inner_features, norm_type=norm_type))
        self.conv2 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1, bias=False),
            ModuleHelper.BNReLU(inner_features, norm_type=norm_type))
        self.conv3 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[0], dilation=dilations[0], bias=False),
            ModuleHelper.BNReLU(inner_features, norm_type=norm_type))
        self.conv4 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[1], dilation=dilations[1], bias=False),
            ModuleHelper.BNReLU(inner_features, norm_type=norm_type))
        self.conv5 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[2], dilation=dilations[2], bias=False),
            ModuleHelper.BNReLU(inner_features, norm_type=norm_type))

        self.bottleneck = nn.Sequential(
            nn.Conv2d(inner_features * 5, out_features, kernel_size=1, padding=0, dilation=1, bias=False),
            ModuleHelper.BNReLU(out_features, norm_type=norm_type),
            nn.Dropout2d(0.1)
        )
示例#10
0
    def __init__(self, fc_dim=4096, norm_type=None):
        super(PPMBilinearDeepsup, self).__init__()
        self.norm_type = norm_type
        pool_scales = (1, 2, 3, 6)
        self.ppm = []
        # assert norm_type == 'syncbn' or not self.training
        # Torch BN can't handle feature map size with 1x1.
        for scale in pool_scales:
            self.ppm.append(
                nn.Sequential(
                    nn.AdaptiveAvgPool2d(scale),
                    nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
                    ModuleHelper.BNReLU(512, norm_type=norm_type)))

        self.ppm = nn.ModuleList(self.ppm)
示例#11
0
 def __init__(self,
              inplanes,
              outplanes,
              kernel_size,
              stride,
              padding=1,
              dilation=1,
              norm_type=None):
     super(_ConvBatchNormReluBlock, self).__init__()
     self.conv = nn.Conv2d(in_channels=inplanes,
                           out_channels=outplanes,
                           kernel_size=kernel_size,
                           stride=stride,
                           padding=padding,
                           dilation=dilation,
                           bias=False)
     self.bn_relu = ModuleHelper.BNReLU(outplanes, norm_type=norm_type)
示例#12
0
 def __init__(self,
              in_channels,
              out_channels,
              key_channels,
              value_channels,
              dropout,
              sizes=([1]),
              norm_type=None,
              psp_size=(1, 3, 6, 8)):
     super(APNB, self).__init__()
     self.stages = []
     self.norm_type = norm_type
     self.psp_size = psp_size
     self.stages = nn.ModuleList([
         self._make_stage(in_channels, out_channels, key_channels,
                          value_channels, size) for size in sizes
     ])
     self.conv_bn_dropout = nn.Sequential(
         nn.Conv2d(2 * in_channels, out_channels, kernel_size=1, padding=0),
         ModuleHelper.BNReLU(out_channels, norm_type=norm_type),
         nn.Dropout2d(dropout))