Пример #1
0
 def __init__(self,
              in_channels,
              out_channels,
              reduction=1,
              norm_layer=nn.BatchNorm2d,
              **kwargs):
     super(FeatureFusion, self).__init__()
     self.conv1x1 = _ConvBNReLU(in_channels,
                                out_channels,
                                1,
                                1,
                                0,
                                norm_layer=norm_layer,
                                **kwargs)
     self.channel_attention = nn.Sequential(
         nn.AdaptiveAvgPool2d(1),
         _ConvBNReLU(out_channels,
                     out_channels // reduction,
                     1,
                     1,
                     0,
                     norm_layer=norm_layer),
         _ConvBNReLU(out_channels // reduction,
                     out_channels,
                     1,
                     1,
                     0,
                     norm_layer=norm_layer), nn.Sigmoid())
Пример #2
0
 def __init__(self, in_channels, out_channels, norm_layer=nn.BatchNorm2d, **kwargs):
     super(SpatialPath, self).__init__()
     inter_channels = 64
     self.conv7x7 = _ConvBNReLU(in_channels, inter_channels, 7, 2, 3, norm_layer=norm_layer)
     self.conv3x3_1 = _ConvBNReLU(inter_channels, inter_channels, 3, 2, 1, norm_layer=norm_layer)
     self.conv3x3_2 = _ConvBNReLU(inter_channels, inter_channels, 3, 2, 1, norm_layer=norm_layer)
     self.conv1x1 = _ConvBNReLU(inter_channels, out_channels, 1, 1, 0, norm_layer=norm_layer)
Пример #3
0
    def __init__(self, backbone='resnet18', pretrained_base=True, norm_layer=nn.BatchNorm2d, **kwargs):
        super(ContextPath, self).__init__()
        if backbone == 'resnet18':
            pretrained = resnet18(pretrained=pretrained_base, **kwargs)
        else:
            raise RuntimeError('unknown backbone: {}'.format(backbone))
        self.conv1 = pretrained.conv1
        self.bn1 = pretrained.bn1
        self.relu = pretrained.relu
        self.maxpool = pretrained.maxpool
        self.layer1 = pretrained.layer1
        self.layer2 = pretrained.layer2
        self.layer3 = pretrained.layer3
        self.layer4 = pretrained.layer4

        inter_channels = 128
        self.global_context = _GlobalAvgPooling(512, inter_channels, norm_layer)

        self.arms = nn.ModuleList(
            [AttentionRefinmentModule(512, inter_channels, norm_layer, **kwargs),
             AttentionRefinmentModule(256, inter_channels, norm_layer, **kwargs)]
        )
        self.refines = nn.ModuleList(
            [_ConvBNReLU(inter_channels, inter_channels, 3, 1, 1, norm_layer=norm_layer),
             _ConvBNReLU(inter_channels, inter_channels, 3, 1, 1, norm_layer=norm_layer)]
        )
Пример #4
0
 def __init__(self, in_channels, out_channels, norm_layer=nn.BatchNorm2d, **kwargs):
     super(AttentionRefinmentModule, self).__init__()
     self.conv3x3 = _ConvBNReLU(in_channels, out_channels, 3, 1, 1, norm_layer=norm_layer)
     self.channel_attention = nn.Sequential(
         nn.AdaptiveAvgPool2d(1),
         _ConvBNReLU(out_channels, out_channels, 1, 1, 0, norm_layer=norm_layer),
         nn.Sigmoid()
     )
Пример #5
0
    def __init__(self, in_channels, reduced_channels, out_channels, norm_layer, **kwargs):
        super(_AttentionGeneration, self).__init__()
        self.conv_reduce = _ConvBNReLU(in_channels, reduced_channels, 1, norm_layer=norm_layer)
        self.attention = nn.Sequential(
            _ConvBNReLU(reduced_channels, reduced_channels, 1, norm_layer=norm_layer),
            nn.Conv2d(reduced_channels, out_channels, 1, bias=False))

        self.reduced_channels = reduced_channels
Пример #6
0
    def __init__(self, nclass, norm_layer=nn.BatchNorm2d, **kwargs):
        super(_PSAHead, self).__init__()
        # psa_out_channels = crop_size // 8 ** 2
        self.psa = _PointwiseSpatialAttention(2048, 3600, norm_layer)

        self.conv_post = _ConvBNReLU(1024, 2048, 1, norm_layer=norm_layer)
        self.project = nn.Sequential(
            _ConvBNReLU(4096, 512, 3, padding=1, norm_layer=norm_layer),
            nn.Dropout2d(0.1, False), nn.Conv2d(512, nclass, 1))
Пример #7
0
 def __init__(self, in_channels, inter_channels, nclass, norm_layer=nn.BatchNorm2d, **kwargs):
     super(_BiSeHead, self).__init__()
     self.block = nn.Sequential(
         _ConvBNReLU(in_channels, inter_channels, 3, 1, 1, norm_layer=norm_layer),
         nn.Dropout(0.1),
         nn.Conv2d(inter_channels, nclass, 1)
     )
Пример #8
0
    def __init__(self, num_classes=1000, multiplier=1.0, norm_layer=nn.BatchNorm2d, **kwargs):
        super(MobileNet, self).__init__()
        conv_dw_setting = [
            [64, 1, 1],
            [128, 2, 2],
            [256, 2, 2],
            [512, 6, 2],
            [1024, 2, 2]]
        input_channels = int(32 * multiplier) if multiplier > 1.0 else 32
        features = [_ConvBNReLU(3, input_channels, 3, 2, 1, norm_layer=norm_layer)]

        for c, n, s in conv_dw_setting:
            out_channels = int(c * multiplier)
            for i in range(n):
                stride = s if i == 0 else 1
                features.append(_DepthwiseConv(input_channels, out_channels, stride, norm_layer))
                input_channels = out_channels
        features.append(nn.AdaptiveAvgPool2d(1))
        self.features = nn.Sequential(*features)

        self.classifier = nn.Linear(int(1024 * multiplier), num_classes)

        # weight initialization
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out')
                if m.bias is not None:
                    nn.init.zeros_(m.bias)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.ones_(m.weight)
                nn.init.zeros_(m.bias)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.zeros_(m.bias)
Пример #9
0
 def __init__(self, in_channels, nclass, norm_layer=nn.BatchNorm2d, **kwargs):
     super(APNModule, self).__init__()
     self.conv1 = _ConvBNReLU(in_channels, in_channels, 3, 2, 1, norm_layer=norm_layer)
     self.conv2 = _ConvBNReLU(in_channels, in_channels, 5, 2, 2, norm_layer=norm_layer)
     self.conv3 = _ConvBNReLU(in_channels, in_channels, 7, 2, 3, norm_layer=norm_layer)
     self.level1 = _ConvBNReLU(in_channels, nclass, 1, norm_layer=norm_layer)
     self.level2 = _ConvBNReLU(in_channels, nclass, 1, norm_layer=norm_layer)
     self.level3 = _ConvBNReLU(in_channels, nclass, 1, norm_layer=norm_layer)
     self.level4 = _ConvBNReLU(in_channels, nclass, 1, norm_layer=norm_layer)
     self.level5 = nn.Sequential(
         nn.AdaptiveAvgPool2d(1),
         _ConvBNReLU(in_channels, nclass, 1))
    def __init__(self,
                 nclass,
                 backbone="",
                 aux=False,
                 jpu=False,
                 pretrained_base=False,
                 **kwargs):
        super(DFANet, self).__init__()
        self.pretrained = get_xception_a(pretrained_base, **kwargs)

        self.enc2_2 = Enc(240, 48, 4, **kwargs)
        self.enc3_2 = Enc(144, 96, 6, **kwargs)
        self.enc4_2 = Enc(288, 192, 4, **kwargs)
        self.fca_2 = FCAttention(192, **kwargs)

        self.enc2_3 = Enc(240, 48, 4, **kwargs)
        self.enc3_3 = Enc(144, 96, 6, **kwargs)
        self.enc3_4 = Enc(288, 192, 4, **kwargs)
        self.fca_3 = FCAttention(192, **kwargs)

        self.enc2_1_reduce = _ConvBNReLU(48, 32, 1, **kwargs)
        self.enc2_2_reduce = _ConvBNReLU(48, 32, 1, **kwargs)
        self.enc2_3_reduce = _ConvBNReLU(48, 32, 1, **kwargs)
        self.conv_fusion = _ConvBNReLU(32, 32, 1, **kwargs)

        self.fca_1_reduce = _ConvBNReLU(192, 32, 1, **kwargs)
        self.fca_2_reduce = _ConvBNReLU(192, 32, 1, **kwargs)
        self.fca_3_reduce = _ConvBNReLU(192, 32, 1, **kwargs)
        self.conv_out = nn.Conv2d(32, nclass, 1)

        self.__setattr__(
            "exclusive",
            [
                "enc2_2",
                "enc3_2",
                "enc4_2",
                "fca_2",
                "enc2_3",
                "enc3_3",
                "enc3_4",
                "fca_3",
                "enc2_1_reduce",
                "enc2_2_reduce",
                "enc2_3_reduce",
                "conv_fusion",
                "fca_1_reduce",
                "fca_2_reduce",
                "fca_3_reduce",
                "conv_out",
            ],
        )
    def __init__(self,
                 num_classes=1000,
                 multiplier=1.0,
                 norm_layer=nn.BatchNorm2d,
                 **kwargs):
        super(MobileNetV2, self).__init__()
        inverted_residual_setting = [
            # t, c, n, s
            [1, 16, 1, 1],
            [6, 24, 2, 2],
            [6, 32, 3, 2],
            [6, 64, 4, 2],
            [6, 96, 3, 1],
            [6, 160, 3, 2],
            [6, 320, 1, 1]
        ]
        # building first layer
        input_channels = int(32 * multiplier) if multiplier > 1.0 else 32
        last_channels = int(1280 * multiplier) if multiplier > 1.0 else 1280
        features = [
            _ConvBNReLU(3,
                        input_channels,
                        3,
                        2,
                        1,
                        relu6=True,
                        norm_layer=norm_layer)
        ]

        # building inverted residual blocks
        for t, c, n, s in inverted_residual_setting:
            out_channels = int(c * multiplier)
            for i in range(n):
                stride = s if i == 0 else 1
                features.append(
                    InvertedResidual(input_channels, out_channels, stride, t,
                                     norm_layer))
                input_channels = out_channels

        # building last several layers
        features.append(
            _ConvBNReLU(input_channels,
                        last_channels,
                        1,
                        relu6=True,
                        norm_layer=norm_layer))
        features.append(nn.AdaptiveAvgPool2d(1))
        self.features = nn.Sequential(*features)

        self.classifier = nn.Sequential(nn.Dropout2d(0.2),
                                        nn.Linear(last_channels, num_classes))

        # weight initialization
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out')
                if m.bias is not None:
                    nn.init.zeros_(m.bias)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.ones_(m.weight)
                nn.init.zeros_(m.bias)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                if m.bias is not None:
                    nn.init.zeros_(m.bias)
    def __init__(self, backbone='resnet18', pretrained_base=True, norm_layer=nn.BatchNorm2d, **kwargs):
        super(ContextPath, self).__init__()
        self.backbone = backbone
        if backbone == 'resnet18':
            pretrained = resnet18(pretrained=pretrained_base, **kwargs)
        elif backbone == 'resnet50':
            pretrained = resnet50(pretrained=pretrained_base, **kwargs)
        elif backbone == 'xception':
            pretrained = get_xception_71(pretrained=pretrained_base, **kwargs)
        elif backbone == 'mobilenet':
            pretrained = get_mobilenet_v2(pretrained=pretrained_base, **kwargs)
        else:
            raise RuntimeError('unknown backbone: {}'.format(backbone))
        if backbone == 'xception':
            self.conv1 = pretrained.conv1
            self.bn1 = pretrained.bn1
            self.relu = pretrained.relu
            self.conv2 = pretrained.conv2
            self.bn2 = pretrained.bn2
            self.block1 = pretrained.block1
            self.block2_1 = pretrained.block2_1
            self.block2_2 = pretrained.block2_2
            self.block2 = pretrained.block2
            self.block3 = pretrained.block3
            self.midflow = pretrained.midflow
            self.block20 = pretrained.block20
            self.conv3 = pretrained.conv3
            self.bn3 = pretrained.bn3
            self.conv4 = pretrained.conv4
            self.bn4 = pretrained.bn4
            self.conv5 = pretrained.conv5
            self.bn5 = pretrained.bn5
            self.avgpool = pretrained.avgpool
            self.fc = pretrained.fc
        elif backbone == 'mobilenet':
            self.down8 = pretrained.down8
            self.down16 = pretrained.down16
            self.down32 = pretrained.down32
        else :
            self.conv1 = pretrained.conv1
            self.bn1 = pretrained.bn1
            self.relu = pretrained.relu
            self.maxpool = pretrained.maxpool
            self.layer1 = pretrained.layer1
            self.layer2 = pretrained.layer2
            self.layer3 = pretrained.layer3
            self.layer4 = pretrained.layer4

        inter_channels = 128
        in_channels = 512
        second_in_channels = 256
        third_in_channels = 728
        if backbone == 'resnet50' or backbone == 'xception':
            in_channels = 2048
            second_in_channels = 728
        elif backbone == 'mobilenet':
            in_channels = 1280
            second_in_channels = 96
        self.global_context = _GlobalAvgPooling(in_channels, inter_channels, norm_layer)

        self.arms = nn.ModuleList(
            [AttentionRefinmentModule(in_channels, inter_channels, norm_layer, **kwargs),
             AttentionRefinmentModule(second_in_channels, inter_channels, norm_layer, **kwargs),
             AttentionRefinmentModule(third_in_channels, inter_channels, norm_layer, **kwargs)]
        )
        self.refines = nn.ModuleList(
            [_ConvBNReLU(inter_channels, inter_channels, 3, 1, 1, norm_layer=norm_layer),
             _ConvBNReLU(inter_channels, inter_channels, 3, 1, 1, norm_layer=norm_layer),
             _ConvBNReLU(inter_channels, inter_channels, 3, 1, 1, norm_layer=norm_layer)]
        )