コード例 #1
0
    def __init__(self, in_dim, out_dim, scale):
        super(sa_MagicModule, self).__init__()
        self.atte_branch = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=3, padding=1, dilation=1, bias=False),
                                         InPlaceABNSync(out_dim),
                                         SelfAttentionModule(in_dim=out_dim, out_dim=out_dim, key_dim=out_dim // 2,
                                                             value_dim=out_dim, scale=scale))
        # TODO: change SE Module to Channel Attention Module
        self.dilation_x = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=1, padding=0, dilation=1, bias=False),
                                        InPlaceABNSync(out_dim), SEModule(out_dim, reduction=16))

        # self.dilation_x = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=1, padding=0, dilation=1, bias=False),
        #                                 InPlaceABNSync(out_dim), ChannelAttentionModule(out_dim))

        self.dilation_0 = nn.Sequential(ContextContrastedModule(in_dim, out_dim, rate=6),
                                        SEModule(out_dim, reduction=16))

        self.dilation_1 = nn.Sequential(ContextContrastedModule(in_dim, out_dim, rate=12),
                                        SEModule(out_dim, reduction=16))

        self.dilation_2 = nn.Sequential(ContextContrastedModule(in_dim, out_dim, rate=18),
                                        SEModule(out_dim, reduction=16))

        self.dilation_3 = nn.Sequential(ContextContrastedModule(in_dim, out_dim, rate=24),
                                        SEModule(out_dim, reduction=16))

        self.head_conv = nn.Sequential(nn.Conv2d(out_dim * 6, out_dim, kernel_size=1, padding=0, bias=False),
                                       InPlaceABNSync(out_dim)
                                       )
        self.refine = nn.Sequential(nn.Conv2d(out_dim, out_dim, kernel_size=3, stride=1, padding=1, bias=False),
                                       InPlaceABNSync(out_dim))
        self.project = nn.Conv2d(6 * out_dim, 6, kernel_size=1, padding=0, bias=True)
コード例 #2
0
    def __init__(self, in_dim, out_dim, scale):
        super(SEOCModule, self).__init__()
        self.atte_branch = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=True), InPlaceABNSync(out_dim),
            SelfAttentionModule(in_dim=out_dim,
                                out_dim=out_dim,
                                key_dim=out_dim // 2,
                                value_dim=out_dim,
                                scale=scale))

        self.dilation_0 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.dilation_1 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=3,
                      padding=12,
                      dilation=12,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.dilation_2 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=3,
                      padding=24,
                      dilation=24,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.dilation_3 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=3,
                      padding=36,
                      dilation=36,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.head_conv = nn.Sequential(
            nn.Conv2d(out_dim * 5,
                      out_dim,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=False), InPlaceABNSync(out_dim))
コード例 #3
0
 def __init__(self, fbody_cls):
     super(AlphaFBDecoder, self).__init__()
     self.conv1 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1, stride=1, bias=False),
                                BatchNorm2d(256), nn.ReLU(inplace=False),
                                nn.Conv2d(256, 256, kernel_size=1, padding=0, stride=1, bias=False),
                                BatchNorm2d(256), nn.ReLU(inplace=False), SEModule(256, reduction=16))
     self.alpha_fb = nn.Parameter(torch.ones(1))
コード例 #4
0
    def __init__(self, num_classes):
        super(DecoderModule, self).__init__()
        self.conv0 = nn.Sequential(
            nn.Conv2d(512,
                      512,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=False), BatchNorm2d(512), nn.ReLU(inplace=False))
        self.conv1 = nn.Sequential(
            nn.Conv2d(512,
                      256,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=False), BatchNorm2d(256), nn.ReLU(inplace=False),
            SEModule(256, reduction=16))

        # self.conv2 = nn.Sequential(nn.Conv2d(256, 48, kernel_size=1, stride=1, padding=0, dilation=1, bias=False),
        #                            BatchNorm2d(48), nn.ReLU(inplace=False))
        #
        # self.conv3 = nn.Sequential(nn.Conv2d(304, 256, kernel_size=1, padding=0, dilation=1, bias=False),
        #                            BatchNorm2d(256), nn.ReLU(inplace=False),
        #                            nn.Conv2d(256, 256, kernel_size=1, padding=0, dilation=1, bias=False),
        #                            BatchNorm2d(256), nn.ReLU(inplace=False))

        # self.conv4 = nn.Conv2d(256, num_classes, kernel_size=1, padding=0, dilation=1, bias=True)
        self.alpha = nn.Parameter(torch.ones(1))
コード例 #5
0
 def __init__(self, fbody_cls):
     super(FBodyDecoder, self).__init__()
     self.conv0 = nn.Sequential(
         nn.Conv2d(256,
                   48,
                   kernel_size=1,
                   stride=1,
                   padding=0,
                   dilation=1,
                   bias=False), BatchNorm2d(48), nn.ReLU(inplace=False))
     self.conv1 = nn.Sequential(
         nn.Conv2d(304, 256, kernel_size=3, padding=1, stride=1,
                   bias=False),
         BatchNorm2d(256),
         nn.ReLU(inplace=False),
         nn.Conv2d(256, 256, kernel_size=1, padding=0, stride=1,
                   bias=False),
         BatchNorm2d(256),
         nn.ReLU(inplace=False),
         SEModule(256, reduction=16),
     )
     self.project = nn.Conv2d(256,
                              fbody_cls,
                              kernel_size=1,
                              padding=0,
                              stride=1,
                              bias=True)
コード例 #6
0
    def __init__(self, in_dim, out_dim, scale):
        super(MagicModule, self).__init__()
        self.atte_branch = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=True), InPlaceABNSync(out_dim),
            SelfAttentionModule(in_dim=out_dim,
                                out_dim=out_dim,
                                key_dim=out_dim // 2,
                                value_dim=out_dim,
                                scale=scale))
        # added
        self.dilation_x = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.dilation_0 = nn.Sequential(
            ContextContrastedModule(in_dim, out_dim, rate=6),
            SEModule(out_dim, reduction=16))

        self.dilation_1 = nn.Sequential(
            ContextContrastedModule(in_dim, out_dim, rate=12),
            SEModule(out_dim, reduction=16))

        self.dilation_2 = nn.Sequential(
            ContextContrastedModule(in_dim, out_dim, rate=18),
            SEModule(out_dim, reduction=16))

        self.dilation_3 = nn.Sequential(
            ContextContrastedModule(in_dim, out_dim, rate=24),
            SEModule(out_dim, reduction=16))

        self.head_conv = nn.Sequential(
            nn.Conv2d(out_dim * 6,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      bias=False), InPlaceABNSync(out_dim))
コード例 #7
0
 def __init__(self, hbody_cls):
     super(AlphaHBDecoder, self).__init__()
     self.conv1 = nn.Sequential(
         nn.Conv2d(512, 256, kernel_size=3, padding=1, stride=1,
                   bias=False), BatchNorm2d(256), nn.ReLU(inplace=False),
         nn.Conv2d(256, 256, kernel_size=1, padding=0, stride=1,
                   bias=False), BatchNorm2d(256), nn.ReLU(inplace=False),
         SEModule(256, reduction=16),
         nn.Conv2d(256,
                   hbody_cls,
                   kernel_size=1,
                   padding=0,
                   stride=1,
                   bias=True))
コード例 #8
0
    def __init__(self, in_dim, num_classes):
        super(DecoderModule, self).__init__()

        self.conv1 = nn.Sequential(
            nn.Conv2d(in_dim,
                      256,
                      kernel_size=3,
                      padding=1,
                      stride=1,
                      bias=False), BatchNorm2d(256), nn.ReLU(inplace=False),
            nn.Conv2d(256,
                      256,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=False), BatchNorm2d(256), nn.ReLU(inplace=False),
            SEModule(256, reduction=16),
            nn.Conv2d(256,
                      num_classes,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=True))
コード例 #9
0
    def __init__(self, in_dim, out_dim, scale=1):
        super(ASPPModule, self).__init__()
        self.gap = nn.Sequential(nn.AdaptiveAvgPool2d(1),
                                 nn.Conv2d(in_dim, out_dim, 1, bias=False),
                                 InPlaceABNSync(out_dim))

        self.dilation_0 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.dilation_1 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_dim),
            nn.Conv2d(out_dim,
                      out_dim,
                      kernel_size=3,
                      padding=6,
                      dilation=6,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.dilation_2 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_dim),
            nn.Conv2d(out_dim,
                      out_dim,
                      kernel_size=3,
                      padding=12,
                      dilation=12,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.dilation_3 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_dim),
            nn.Conv2d(out_dim,
                      out_dim,
                      kernel_size=3,
                      padding=18,
                      dilation=18,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.psaa_conv = nn.Sequential(
            nn.Conv2d(in_dim + 5 * out_dim, out_dim, 1, padding=0, bias=False),
            InPlaceABNSync(out_dim), nn.Conv2d(out_dim, 5, 1, bias=True),
            nn.Sigmoid())

        self.project = nn.Sequential(
            nn.Conv2d(out_dim * 5,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      bias=False), InPlaceABNSync(out_dim))
コード例 #10
0
    def __init__(self, in_dim, out_dim):
        super(ASPPModule2, self).__init__()

        self.gap = nn.Sequential(nn.AdaptiveAvgPool2d(1),
                                 nn.Conv2d(in_dim, out_dim, 1, bias=False),
                                 InPlaceABNSync(out_dim))

        self.dilation_0 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.dilation_1 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_dim),
            nn.Conv2d(out_dim,
                      out_dim,
                      kernel_size=3,
                      padding=6,
                      dilation=6,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.dilation_2 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_dim),
            nn.Conv2d(out_dim,
                      out_dim,
                      kernel_size=3,
                      padding=12,
                      dilation=12,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.dilation_3 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_dim),
            nn.Conv2d(out_dim,
                      out_dim,
                      kernel_size=3,
                      padding=18,
                      dilation=18,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.project = nn.Sequential(
            nn.Conv2d(out_dim * 5,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      bias=False), InPlaceABNSync(out_dim))
        self.pam0 = PAM_Module(in_dim=out_dim,
                               key_dim=out_dim // 8,
                               value_dim=out_dim,
                               out_dim=out_dim)