Ejemplo n.º 1
0
 def __init__(self, configer):
     super(asymmetric_non_local_network, self).__init__()
     self.configer = configer
     self.num_classes = self.configer.get('data', 'num_classes')
     self.backbone = ModuleHelper.get_backbone(
         backbone=self.configer.get('network.backbone'),
         pretrained=self.configer.get('network.pretrained')
     )
     # low_in_channels, high_in_channels, out_channels, key_channels, value_channels, dropout
     self.fusion = AFNB(1024, 2048, 2048, 256, 256, dropout=0.05, sizes=([1]), norm_type=self.configer.get('network', 'norm_type'))
     # extra added layers
     self.context = nn.Sequential(
         nn.Conv2d(2048, 512, kernel_size=3, stride=1, padding=1),
         ModuleHelper.BNReLU(512, norm_type=self.configer.get('network', 'norm_type')),
         APNB(in_channels=512, out_channels=512, key_channels=256, value_channels=256,
                      dropout=0.05, sizes=([1]), norm_type=self.configer.get('network', 'norm_type'))
     )
     self.cls = nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True)
     self.dsn = nn.Sequential(
         nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1),
         ModuleHelper.BNReLU(512, norm_type=self.configer.get('network', 'norm_type')),
         nn.Dropout2d(0.05),
         nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True)
     )
     self.valid_loss_dict = configer.get('loss', 'loss_weights', configer.get('loss.loss_type'))
Ejemplo n.º 2
0
    def __init__(self, low_in_channels, high_in_channels, key_channels, value_channels, out_channels=None, scale=1, norm_type=None,psp_size=(1,3,6,8)):
        super(_SelfAttentionBlock, self).__init__()
        self.scale = scale
        self.in_channels = low_in_channels
        self.out_channels = out_channels
        self.key_channels = key_channels
        self.value_channels = value_channels
        if out_channels == None:
            self.out_channels = high_in_channels
        self.pool = nn.MaxPool2d(kernel_size=(scale, scale))
        self.f_key = nn.Sequential(
            nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels,
                      kernel_size=1, stride=1, padding=0),
            ModuleHelper.BNReLU(self.key_channels, norm_type=norm_type),
        )
        self.f_query = nn.Sequential(
            nn.Conv2d(in_channels=high_in_channels, out_channels=self.key_channels,
                      kernel_size=1, stride=1, padding=0),
            ModuleHelper.BNReLU(self.key_channels, norm_type=norm_type),
        )
        self.f_value = nn.Conv2d(in_channels=self.in_channels, out_channels=self.value_channels,
                                 kernel_size=1, stride=1, padding=0)
        self.W = nn.Conv2d(in_channels=self.value_channels, out_channels=self.out_channels,
                           kernel_size=1, stride=1, padding=0)

        self.psp = PSPModule(psp_size)
        nn.init.constant_(self.W.weight, 0)
        nn.init.constant_(self.W.bias, 0)
Ejemplo n.º 3
0
    def __init__(self, configer):
        super(ResSFNet, self).__init__()
        self.configer = configer
        self.num_classes = self.configer.get('data', 'num_classes')
        base = ModuleHelper.get_backbone(
            backbone=self.configer.get('network.backbone'),
            pretrained=self.configer.get('network.pretrained'))
        self.stage1 = nn.Sequential(base.conv1, base.bn1, base.relu1,
                                    base.conv2, base.bn2, base.relu2,
                                    base.conv3, base.bn3, base.relu3,
                                    base.maxpool, base.layer1)
        self.stage2 = base.layer2
        self.stage3 = base.layer3
        self.stage4 = base.layer4
        num_features = 512 if 'resnet18' in self.configer.get(
            'network.backbone') else 2048
        fpn_dim = max(num_features // 8, 128)
        self.head = AlignHead(num_features, fpn_dim=fpn_dim)
        self.dsn = nn.Sequential(
            nn.Conv2d(num_features // 2,
                      max(num_features // 4, 256),
                      kernel_size=3,
                      stride=1,
                      padding=1),
            ModuleHelper.BNReLU(max(num_features // 4, 256),
                                norm_type="batchnorm"), nn.Dropout2d(0.1),
            nn.Conv2d(max(num_features // 4, 256),
                      self.num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
        self.conv_last = nn.Sequential(
            conv3x3_bn_relu(4 * fpn_dim, fpn_dim, 1),
            nn.Conv2d(fpn_dim, self.num_classes, kernel_size=1))
        self.fpn_dsn = nn.ModuleList()
        for i in range(len([2, 4, 8])):
            self.fpn_dsn.append(
                nn.Sequential(
                    nn.Conv2d(fpn_dim,
                              fpn_dim,
                              kernel_size=3,
                              stride=1,
                              padding=1),
                    ModuleHelper.BNReLU(fpn_dim, norm_type="batchnorm"),
                    nn.Dropout2d(0.1),
                    nn.Conv2d(fpn_dim,
                              self.num_classes,
                              kernel_size=1,
                              stride=1,
                              padding=0,
                              bias=True)))

        self.valid_loss_dict = configer.get('loss', 'loss_weights',
                                            configer.get('loss.loss_type'))
Ejemplo n.º 4
0
def conv3x3_bn_relu(in_planes, out_planes, stride=1, norm_type="batchnorm"):
    return nn.Sequential(
        nn.Conv2d(in_planes,
                  out_planes,
                  kernel_size=3,
                  stride=stride,
                  padding=1,
                  bias=False),
        ModuleHelper.BNReLU(out_planes, norm_type=norm_type),
    )
Ejemplo n.º 5
0
    def __init__(self, features, inner_features=512, out_features=512, dilations=(12, 24, 36), norm_type=None):
        super(ASPPModule, self).__init__()

        self.conv1 = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
                                   nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1,
                                             bias=False),
                                   ModuleHelper.BNReLU(inner_features, norm_type=norm_type))
        self.conv2 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1, bias=False),
            ModuleHelper.BNReLU(inner_features, norm_type=norm_type))
        self.conv3 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[0], dilation=dilations[0], bias=False),
            ModuleHelper.BNReLU(inner_features, norm_type=norm_type))
        self.conv4 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[1], dilation=dilations[1], bias=False),
            ModuleHelper.BNReLU(inner_features, norm_type=norm_type))
        self.conv5 = nn.Sequential(
            nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[2], dilation=dilations[2], bias=False),
            ModuleHelper.BNReLU(inner_features, norm_type=norm_type))

        self.bottleneck = nn.Sequential(
            nn.Conv2d(inner_features * 5, out_features, kernel_size=1, padding=0, dilation=1, bias=False),
            ModuleHelper.BNReLU(out_features, norm_type=norm_type),
            nn.Dropout2d(0.1)
        )
Ejemplo n.º 6
0
    def __init__(self, fc_dim=4096, norm_type=None):
        super(PPMBilinearDeepsup, self).__init__()
        self.norm_type = norm_type
        pool_scales = (1, 2, 3, 6)
        self.ppm = []
        # assert norm_type == 'syncbn' or not self.training
        # Torch BN can't handle feature map size with 1x1.
        for scale in pool_scales:
            self.ppm.append(
                nn.Sequential(
                    nn.AdaptiveAvgPool2d(scale),
                    nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
                    ModuleHelper.BNReLU(512, norm_type=norm_type)))

        self.ppm = nn.ModuleList(self.ppm)
Ejemplo n.º 7
0
 def __init__(self,
              inplanes,
              outplanes,
              kernel_size,
              stride,
              padding=1,
              dilation=1,
              norm_type=None):
     super(_ConvBatchNormReluBlock, self).__init__()
     self.conv = nn.Conv2d(in_channels=inplanes,
                           out_channels=outplanes,
                           kernel_size=kernel_size,
                           stride=stride,
                           padding=padding,
                           dilation=dilation,
                           bias=False)
     self.bn_relu = ModuleHelper.BNReLU(outplanes, norm_type=norm_type)
Ejemplo n.º 8
0
    def __init__(self,
                 features,
                 out_features=512,
                 sizes=(1, 2, 3, 6),
                 norm_type="batchnorm"):
        super(PSPModule, self).__init__()

        self.stages = []
        self.stages = nn.ModuleList([
            self._make_stage(features, out_features, size, norm_type)
            for size in sizes
        ])
        self.bottleneck = nn.Sequential(
            nn.Conv2d(features + len(sizes) * out_features,
                      out_features,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False),
            ModuleHelper.BNReLU(out_features, norm_type=norm_type),
            nn.Dropout2d(0.1))
Ejemplo n.º 9
0
    def __init__(self, inplanes, norm_type="batchnorm", fpn_dim=256):
        super(AlignHead, self).__init__()
        self.ppm = PSPModule(inplanes,
                             norm_type=norm_type,
                             out_features=fpn_dim)
        fpn_inplanes = [inplanes // 8, inplanes // 4, inplanes // 2, inplanes]
        self.fpn_in = nn.ModuleList()
        for fpn_inplane in fpn_inplanes[:-1]:
            self.fpn_in.append(
                nn.Sequential(
                    nn.Conv2d(fpn_inplane, fpn_dim, 1),
                    ModuleHelper.BNReLU(fpn_dim, norm_type=norm_type),
                ))

        self.fpn_out = nn.ModuleList()
        self.fpn_out_align = nn.ModuleList()
        for i in range(len(fpn_inplanes) - 1):
            self.fpn_out.append(
                nn.Sequential(conv3x3_bn_relu(fpn_dim, fpn_dim, 1), ))
            self.fpn_out_align.append(
                AlignModule(inplane=fpn_dim, outplane=fpn_dim // 2))
Ejemplo n.º 10
0
 def __init__(self,
              in_channels,
              out_channels,
              key_channels,
              value_channels,
              dropout,
              sizes=([1]),
              norm_type=None,
              psp_size=(1, 3, 6, 8)):
     super(APNB, self).__init__()
     self.stages = []
     self.norm_type = norm_type
     self.psp_size = psp_size
     self.stages = nn.ModuleList([
         self._make_stage(in_channels, out_channels, key_channels,
                          value_channels, size) for size in sizes
     ])
     self.conv_bn_dropout = nn.Sequential(
         nn.Conv2d(2 * in_channels, out_channels, kernel_size=1, padding=0),
         ModuleHelper.BNReLU(out_channels, norm_type=norm_type),
         nn.Dropout2d(dropout))
Ejemplo n.º 11
0
    def __init__(self, configer):
        super(PSPNet, self).__init__()
        self.configer = configer
        self.num_classes = self.configer.get('data', 'num_classes')
        base = ModuleHelper.get_backbone(
            backbone=self.configer.get('network.backbone'),
            pretrained=self.configer.get('network.pretrained'))
        self.stage1 = nn.Sequential(base.conv1, base.bn1, base.relu1,
                                    base.conv2, base.bn2, base.relu2,
                                    base.conv3, base.bn3, base.relu3,
                                    base.maxpool, base.layer1, base.layer2,
                                    base.layer3)
        self.stage2 = base.layer4
        num_features = 512 if 'resnet18' in self.configer.get(
            'network.backbone') else 2048
        self.dsn = nn.Sequential(
            _ConvBatchNormReluBlock(num_features // 2,
                                    num_features // 4,
                                    3,
                                    1,
                                    norm_type=self.configer.get(
                                        'network', 'norm_type')),
            nn.Dropout2d(0.1),
            nn.Conv2d(num_features // 4, self.num_classes, 1, 1, 0))
        self.ppm = PPMBilinearDeepsup(fc_dim=num_features,
                                      norm_type=self.configer.get(
                                          'network', 'norm_type'))

        self.cls = nn.Sequential(
            nn.Conv2d(num_features + 4 * 512,
                      512,
                      kernel_size=3,
                      padding=1,
                      bias=False),
            ModuleHelper.BNReLU(512,
                                norm_type=self.configer.get(
                                    'network', 'norm_type')),
            nn.Dropout2d(0.1), nn.Conv2d(512, self.num_classes, kernel_size=1))
        self.valid_loss_dict = configer.get('loss', 'loss_weights',
                                            configer.get('loss.loss_type'))
Ejemplo n.º 12
0
 def __init__(self, configer):
     self.inplanes = 128
     super(DeepLabV3, self).__init__()
     self.configer = configer
     self.num_classes = self.configer.get('data', 'num_classes')
     base = ModuleHelper.get_backbone(
         backbone=self.configer.get('network.backbone'),
         pretrained=self.configer.get('network.pretrained'))
     self.stage1 = nn.Sequential(base.conv1, base.bn1, base.relu1,
                                 base.conv2, base.bn2, base.relu2,
                                 base.conv3, base.bn3, base.relu3,
                                 base.maxpool, base.layer1, base.layer2,
                                 base.layer3)
     self.stage2 = base.layer4
     num_features = 512 if 'resnet18' in self.configer.get(
         'network.backbone') else 2048
     self.head = nn.Sequential(
         ASPPModule(num_features,
                    norm_type=self.configer.get('network', 'norm_type')),
         nn.Conv2d(512,
                   self.num_classes,
                   kernel_size=1,
                   stride=1,
                   padding=0,
                   bias=True))
     self.dsn = nn.Sequential(
         nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1),
         ModuleHelper.BNReLU(512,
                             norm_type=self.configer.get(
                                 'network', 'norm_type')),
         nn.Dropout2d(0.1),
         nn.Conv2d(512,
                   self.num_classes,
                   kernel_size=1,
                   stride=1,
                   padding=0,
                   bias=True))
     self.valid_loss_dict = configer.get('loss', 'loss_weights',
                                         configer.get('loss.loss_type'))