Esempio n. 1
0
    def __init__(self, in_dim, out_dim, scale):
        super(sa_MagicModule, self).__init__()
        self.atte_branch = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=3, padding=1, dilation=1, bias=False),
                                         InPlaceABNSync(out_dim),
                                         SelfAttentionModule(in_dim=out_dim, out_dim=out_dim, key_dim=out_dim // 2,
                                                             value_dim=out_dim, scale=scale))
        # TODO: change SE Module to Channel Attention Module
        self.dilation_x = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=1, padding=0, dilation=1, bias=False),
                                        InPlaceABNSync(out_dim), SEModule(out_dim, reduction=16))

        # self.dilation_x = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=1, padding=0, dilation=1, bias=False),
        #                                 InPlaceABNSync(out_dim), ChannelAttentionModule(out_dim))

        self.dilation_0 = nn.Sequential(ContextContrastedModule(in_dim, out_dim, rate=6),
                                        SEModule(out_dim, reduction=16))

        self.dilation_1 = nn.Sequential(ContextContrastedModule(in_dim, out_dim, rate=12),
                                        SEModule(out_dim, reduction=16))

        self.dilation_2 = nn.Sequential(ContextContrastedModule(in_dim, out_dim, rate=18),
                                        SEModule(out_dim, reduction=16))

        self.dilation_3 = nn.Sequential(ContextContrastedModule(in_dim, out_dim, rate=24),
                                        SEModule(out_dim, reduction=16))

        self.head_conv = nn.Sequential(nn.Conv2d(out_dim * 6, out_dim, kernel_size=1, padding=0, bias=False),
                                       InPlaceABNSync(out_dim)
                                       )
        self.refine = nn.Sequential(nn.Conv2d(out_dim, out_dim, kernel_size=3, stride=1, padding=1, bias=False),
                                       InPlaceABNSync(out_dim))
        self.project = nn.Conv2d(6 * out_dim, 6, kernel_size=1, padding=0, bias=True)
Esempio n. 2
0
    def __init__(self, features, out_features=512, sizes=(1, 2, 3, 6)):
        super(PSPModule, self).__init__()

        self.psp1 = nn.Sequential(
            nn.AdaptiveAvgPool2d(output_size=(sizes[0], sizes[0])),
            nn.Conv2d(features, out_features, kernel_size=1, bias=False),
            InPlaceABNSync(out_features))

        self.psp2 = nn.Sequential(
            nn.AdaptiveAvgPool2d(output_size=(sizes[1], sizes[1])),
            nn.Conv2d(features, out_features, kernel_size=1, bias=False),
            InPlaceABNSync(out_features))

        self.psp3 = nn.Sequential(
            nn.AdaptiveAvgPool2d(output_size=(sizes[2], sizes[2])),
            nn.Conv2d(features, out_features, kernel_size=1, bias=False),
            InPlaceABNSync(out_features))

        self.psp4 = nn.Sequential(
            nn.AdaptiveAvgPool2d(output_size=(sizes[3], sizes[3])),
            nn.Conv2d(features, out_features, kernel_size=1, bias=False),
            InPlaceABNSync(out_features))

        self.bottleneck = nn.Sequential(
            nn.Conv2d(features + len(sizes) * out_features,
                      out_features,
                      kernel_size=3,
                      padding=1,
                      bias=False), BatchNorm2d(out_features),
            nn.ReLU(inplace=False))
Esempio n. 3
0
 def __init__(self, fb_cls):
     super(FBCombineBlock, self).__init__()
     self.conv1 = nn.Sequential(nn.Conv2d(fb_cls * 2, 24, kernel_size=1, padding=0, stride=1, bias=False),
                                InPlaceABNSync(24),
                                nn.Conv2d(24, 24, kernel_size=3, padding=1, stride=1, bias=False),
                                InPlaceABNSync(24),
                                nn.Conv2d(24, fb_cls, kernel_size=1, padding=0, stride=1, bias=True))
Esempio n. 4
0
 def __init__(self, in_dim, out_dim, key_dim, value_dim, scale=2):
     super(SelfAttentionModule, self).__init__()
     self.scale = scale
     self.in_dim = in_dim
     self.out_dim = out_dim
     self.key_dim = key_dim
     self.value_dim = value_dim
     self.pool = nn.MaxPool2d(kernel_size=(scale, scale))
     self.func_key = nn.Sequential(
         nn.Conv2d(in_channels=self.in_dim,
                   out_channels=self.key_dim,
                   kernel_size=1,
                   stride=1,
                   padding=0), InPlaceABNSync(self.key_dim))
     self.func_query = self.func_key
     self.func_value = nn.Conv2d(in_channels=self.in_dim,
                                 out_channels=self.value_dim,
                                 kernel_size=1,
                                 stride=1,
                                 padding=0)
     self.weights = nn.Conv2d(in_channels=self.value_dim,
                              out_channels=self.out_dim,
                              kernel_size=1,
                              stride=1,
                              padding=0)
     self.refine = nn.Sequential(
         nn.Conv2d(in_dim, out_dim, kernel_size=1, padding=0),
         InPlaceABNSync(out_dim))
Esempio n. 5
0
    def __init__(self, in_dim, out_dim, scale, ncls):
        super(ASPOCCAENCModule, self).__init__()

        self.atte_branch = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_dim),
            SelfAttentionModule(in_dim=out_dim,
                                out_dim=out_dim,
                                key_dim=out_dim // 2,
                                value_dim=out_dim,
                                scale=scale))

        self.dilation_0 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_dim))

        self.dilation_1 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=3,
                      padding=12,
                      dilation=12,
                      bias=False), InPlaceABNSync(out_dim))

        self.dilation_2 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=3,
                      padding=24,
                      dilation=24,
                      bias=False), InPlaceABNSync(out_dim))

        self.dilation_3 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=3,
                      padding=36,
                      dilation=36,
                      bias=False), InPlaceABNSync(out_dim))

        self.gsecam = guided_SE_CAM_Module(in_dim, out_dim, out_dim,
                                           BatchNorm2d)

        # self.head_conv = nn.Sequential(nn.Conv2d(out_dim * 5, out_dim*2, kernel_size=1, padding=0, bias=False),
        #                                InPlaceABNSync(out_dim*2),
        #                                nn.Dropout2d(0.1))
        # self.head_conv = nn.Sequential(guided_CA_Module(out_dim*5, out_dim, out_dim, BatchNorm2d),
        #                                nn.Dropout2d(0.1))
        self.head_conv = nn.Sequential(
            guided_SE_CAM_Module(out_dim * 6, 512, 512, BatchNorm2d),
            EncModule(512, nclass=ncls, ncodes=32, se_loss=True))
 def __init__(self, nclasses, num_branches):
     super(CombineBlock, self).__init__()
     # 32 --> 24
     self.conv1 = nn.Sequential(nn.Conv2d(nclasses*num_branches, 24, kernel_size=1, padding=0, stride=1, bias=False),
                                InPlaceABNSync(24),
                                nn.Conv2d(24, 24, kernel_size=1, padding=0, stride=1, bias=False),
                                InPlaceABNSync(24),
                                nn.Conv2d(24, nclasses, kernel_size=1, padding=0, stride=1, bias=True))
Esempio n. 7
0
    def __init__(self, in_dim, out_dim, rate):
        super(ContextContrastedModule, self).__init__()
        self.conv_in = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=1, bias=False), InPlaceABNSync(out_dim))
        self.conv_local = nn.Sequential(nn.Conv2d(out_dim, out_dim, kernel_size=3, padding=1, bias=False),
                                        InPlaceABNSync(out_dim))
        self.conv_context = nn.Sequential(
            nn.Conv2d(out_dim, out_dim, kernel_size=3, dilation=rate, padding=rate, bias=False),
            InPlaceABNSync(out_dim))

        self.conv_out = nn.Sequential(nn.Conv2d(out_dim, out_dim, kernel_size=1, bias=False), InPlaceABNSync(out_dim))
    def __init__(self, block, layers, num_classes):
        self.inplanes = 128
        super(ResNet, self).__init__()
        self.conv1 = conv3x3(3, 64, stride=2)
        self.bn1 = BatchNorm2d(64)
        self.relu1 = nn.ReLU(inplace=False)
        self.conv2 = conv3x3(64, 64)
        self.bn2 = BatchNorm2d(64)
        self.relu2 = nn.ReLU(inplace=False)
        self.conv3 = conv3x3(64, 128)
        self.bn3 = BatchNorm2d(128)
        self.relu3 = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.relu = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3,
                                    stride=2,
                                    padding=1,
                                    ceil_mode=True)  # change
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block,
                                       256,
                                       layers[2],
                                       stride=1,
                                       dilation=2)
        self.layer4 = self._make_layer(
            block, 512, layers[3], stride=1, dilation=4,
            multi_grid=(1, 1, 1))  # we do not apply multi-grid method here

        # extra added layers
        self.context = nn.Sequential(
            nn.Conv2d(2048, 512, kernel_size=3, stride=1, padding=1),
            InPlaceABNSync(512),
            BaseOC_Module(in_channels=512,
                          out_channels=512,
                          key_channels=256,
                          value_channels=256,
                          dropout=0.05,
                          sizes=([1])))
        self.cls = nn.Conv2d(512,
                             num_classes,
                             kernel_size=1,
                             stride=1,
                             padding=0,
                             bias=True)
        self.dsn = nn.Sequential(
            nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1),
            InPlaceABNSync(512), nn.Dropout2d(0.05),
            nn.Conv2d(512,
                      num_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
    def __init__(self, in_dim, out_dim):
        super(ASPPModule, self).__init__()

        self.dilation_0 = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=3, padding=1, dilation=1, bias=False),
                                        InPlaceABNSync(out_dim))
        self.dilation_1 = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=3, padding=2, dilation=2, bias=False),
                                        InPlaceABNSync(out_dim))
        self.dilation_2 = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=3, padding=4, dilation=4, bias=False),
                                        InPlaceABNSync(out_dim))
        self.dilation_3 = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=3, padding=8, dilation=8, bias=False),
                                        InPlaceABNSync(out_dim))
        self.head_conv = nn.Sequential(nn.Conv2d(out_dim * 4, out_dim, kernel_size=1, padding=0, bias=False),
                                       InPlaceABNSync(out_dim))
Esempio n. 10
0
 def __init__(self, input_size, input_dim, hidden_dim, kernel_size, bias,
              dtype):
     """
     Initialize the ConvLSTM cell
     :param input_size: (int, int)
         Height and width of input tensor as (height, width).
     :param input_dim: int
         Number of channels of input tensor.
     :param hidden_dim: int
         Number of channels of hidden state.
     :param kernel_size: (int, int)
         Size of the convolutional kernel.
     :param bias: bool
         Whether or not to add the bias.
     :param dtype: torch.cuda.FloatTensor or torch.FloatTensor
         Whether or not to use cuda.
     """
     super(ConvGRUCell, self).__init__()
     self.height, self.width = input_size
     self.padding = kernel_size[0] // 2, kernel_size[1] // 2
     self.hidden_dim = hidden_dim
     self.bias = bias
     self.dtype = dtype
     self.conv_gates = nn.Conv2d(input_dim + hidden_dim,
                                 2,
                                 kernel_size=1,
                                 padding=0,
                                 stride=1,
                                 bias=True)
     self.conv_can = nn.Sequential(
         nn.Conv2d(input_dim + hidden_dim,
                   hidden_dim,
                   kernel_size=kernel_size,
                   padding=self.padding,
                   stride=1,
                   bias=self.bias), InPlaceABNSync(hidden_dim),
         nn.Conv2d(hidden_dim,
                   hidden_dim,
                   kernel_size=1,
                   padding=0,
                   stride=1,
                   bias=False), InPlaceABNSync(hidden_dim))
     # self.conv_can = nn.Sequential(
     #     nn.Conv2d(input_dim + hidden_dim, 2*hidden_dim, kernel_size=kernel_size, padding=self.padding, stride=1, bias=False),
     #     InPlaceABNSync(2*hidden_dim),
     #     nn.Conv2d(2*hidden_dim, hidden_dim, kernel_size=kernel_size, padding=self.padding, stride=1, bias=True),
     #     BatchNorm2d(hidden_dim)
     # )
     nn.init.orthogonal_(self.conv_gates.weight)
     nn.init.constant_(self.conv_gates.bias, 0.)
Esempio n. 11
0
    def __init__(self, in_dim, out_dim, d_rate=[12, 24, 36]):
        super(DecoderHead, self).__init__()
        self.b0 = nn.Sequential(
            nn.Conv2d(in_dim, out_dim, kernel_size=1, bias=False),
            BatchNorm2d(out_dim), nn.ReLU(inplace=False))

        self.b1 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=3,
                      padding=d_rate[0],
                      dilation=d_rate[0],
                      bias=False), BatchNorm2d(out_dim),
            nn.ReLU(inplace=False))
        self.b2 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=3,
                      padding=d_rate[1],
                      dilation=d_rate[1],
                      bias=False), BatchNorm2d(out_dim),
            nn.ReLU(inplace=False))
        self.b3 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=3,
                      padding=d_rate[2],
                      dilation=d_rate[2],
                      bias=False), BatchNorm2d(out_dim),
            nn.ReLU(inplace=False))
        self.b4 = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(in_dim, out_dim, kernel_size=1, padding=0, bias=False),
            BatchNorm2d(out_dim), nn.ReLU(inplace=False))

        self.project = nn.Sequential(
            nn.Conv2d(out_dim * 5,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      bias=False), InPlaceABNSync(out_dim),
            nn.Conv2d(out_dim,
                      out_dim,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias=False), InPlaceABNSync(out_dim))
 def __init__(self, hbody_cls, num_classes):
     super(GamaPartDecoder, self).__init__()
     self.gate = GatingBlock(in_dim=hbody_cls, out_dim=hbody_cls, force_hard=True)
     self.conv1 = nn.Sequential(nn.Conv2d(hbody_cls, 32, kernel_size=1, padding=0, stride=1, bias=False),
                                nn.ReLU(inplace=False),
                                nn.Conv2d(32, 32, kernel_size=1, padding=0, stride=1, bias=False),
                                InPlaceABNSync(32),
                                nn.Conv2d(32, num_classes, kernel_size=1, padding=0, stride=1, bias=True))
Esempio n. 13
0
 def __init__(self, num_classes, hbody_cls):
     super(BetaHBDecoder, self).__init__()
     self.gate = GatingBlock(in_dim=num_classes, out_dim=num_classes, force_hard=True)
     self.conv1 = nn.Sequential(nn.Conv2d(num_classes, 24, kernel_size=1, padding=0, stride=1, bias=False),
                                nn.ReLU(inplace=False),
                                nn.Conv2d(24, 24, kernel_size=1, padding=0, stride=1, bias=False),
                                InPlaceABNSync(24),
                                nn.Conv2d(24, hbody_cls, kernel_size=1, padding=0, stride=1, bias=True))
Esempio n. 14
0
 def __init__(self, in_channels, out_channels, key_channels, value_channels, dropout, sizes=([1])):
     super(BaseOC_Context_Module, self).__init__()
     self.stages = []
     self.stages = nn.ModuleList([self._make_stage(in_channels, out_channels, key_channels, value_channels, size) for size in sizes])
     self.conv_bn_dropout = nn.Sequential(
         nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0),
         InPlaceABNSync(out_channels),
         )
Esempio n. 15
0
    def __init__(self, in_dim, out_dim, scale):
        super(MagicModule, self).__init__()
        self.atte_branch = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=True), InPlaceABNSync(out_dim),
            SelfAttentionModule(in_dim=out_dim,
                                out_dim=out_dim,
                                key_dim=out_dim // 2,
                                value_dim=out_dim,
                                scale=scale))
        # added
        self.dilation_x = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.dilation_0 = nn.Sequential(
            ContextContrastedModule(in_dim, out_dim, rate=6),
            SEModule(out_dim, reduction=16))

        self.dilation_1 = nn.Sequential(
            ContextContrastedModule(in_dim, out_dim, rate=12),
            SEModule(out_dim, reduction=16))

        self.dilation_2 = nn.Sequential(
            ContextContrastedModule(in_dim, out_dim, rate=18),
            SEModule(out_dim, reduction=16))

        self.dilation_3 = nn.Sequential(
            ContextContrastedModule(in_dim, out_dim, rate=24),
            SEModule(out_dim, reduction=16))

        self.head_conv = nn.Sequential(
            nn.Conv2d(out_dim * 6,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      bias=False), InPlaceABNSync(out_dim))
Esempio n. 16
0
    def __init__(self, in_dim, out_dim, key_dim, value_dim):
        super(SA_upsample, self).__init__()
        self.in_dim = in_dim
        self.out_dim = out_dim
        self.key_dim = key_dim
        self.value_dim = value_dim
        self.func_key = nn.Sequential(nn.Conv2d(in_channels=self.in_dim, out_channels=self.key_dim,
                                                kernel_size=1, stride=1, padding=0, bias=False),
                                      InPlaceABNSync(self.key_dim))
        self.func_query = self.func_key
        self.func_value = nn.Conv2d(in_channels=self.in_dim, out_channels=self.value_dim,
                                    kernel_size=1, stride=1, padding=0)

        self.refine = nn.Sequential(nn.Conv2d(self.value_dim, self.out_dim, kernel_size=1, padding=0, bias=False),
                                    InPlaceABNSync(out_dim))

        self.alpha = nn.Parameter(torch.ones(1))
        self.beta = nn.Parameter(torch.ones(1))
Esempio n. 17
0
    def __init__(self, in_dim, out_dim):
        super(ASPPModule, self).__init__()

        self.gap = nn.Sequential(nn.AdaptiveAvgPool2d(1),
                                 nn.Conv2d(in_dim, out_dim, 1, bias=False), InPlaceABNSync(out_dim))

        self.dilation_0 = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=1, padding=0, dilation=1, bias=False),
                                        InPlaceABNSync(out_dim))

        self.dilation_1 = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=1, padding=0, dilation=1, bias=False),
                                        InPlaceABNSync(out_dim),
                                        nn.Conv2d(out_dim, out_dim, kernel_size=3, padding=6, dilation=6, bias=False),
                                        InPlaceABNSync(out_dim))

        self.dilation_2 = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=1, padding=0, dilation=1, bias=False),
                                        InPlaceABNSync(out_dim),
                                        nn.Conv2d(out_dim, out_dim, kernel_size=3, padding=12, dilation=12, bias=False),
                                        InPlaceABNSync(out_dim))

        self.dilation_3 = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=1, padding=0, dilation=1, bias=False),
                                        InPlaceABNSync(out_dim),
                                        nn.Conv2d(out_dim, out_dim, kernel_size=3, padding=18, dilation=18, bias=False),
                                        InPlaceABNSync(out_dim))

        # self.dilation_4 = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=1, padding=0, dilation=1, bias=False),
        #                                 InPlaceABNSync(out_dim),
        #                                 nn.Conv2d(out_dim, out_dim, kernel_size=3, padding=24, dilation=24, bias=False),
        #                                 InPlaceABNSync(out_dim))

        # self.psaa_conv = nn.Sequential(nn.Conv2d(in_dim + 6 * out_dim, out_dim, 1, padding=0, bias=False),
        #                                 InPlaceABNSync(out_dim),
        #                                 nn.Conv2d(out_dim, 6, 1, bias=True))

        # self.head_conv = nn.Sequential(nn.Conv2d(out_dim * 6, out_dim, kernel_size=1, padding=0, bias=False),
        #                                InPlaceABNSync(out_dim))
        self.psaa_conv = nn.Sequential(nn.Conv2d(in_dim + 5 * out_dim, out_dim, 1, padding=0, bias=False),
                                        InPlaceABNSync(out_dim),
                                        nn.Conv2d(out_dim, 5, 1, bias=True))

        self.head_conv = nn.Sequential(nn.Conv2d(out_dim * 5, out_dim, kernel_size=1, padding=0, bias=False),
                                       InPlaceABNSync(out_dim))
Esempio n. 18
0
    def __init__(self, in_dim, out_dim, scale):
        super(SEOCModule, self).__init__()
        self.atte_branch = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=True), InPlaceABNSync(out_dim),
            SelfAttentionModule(in_dim=out_dim,
                                out_dim=out_dim,
                                key_dim=out_dim // 2,
                                value_dim=out_dim,
                                scale=scale))

        self.dilation_0 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.dilation_1 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=3,
                      padding=12,
                      dilation=12,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.dilation_2 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=3,
                      padding=24,
                      dilation=24,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.dilation_3 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=3,
                      padding=36,
                      dilation=36,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.head_conv = nn.Sequential(
            nn.Conv2d(out_dim * 5,
                      out_dim,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=False), InPlaceABNSync(out_dim))
Esempio n. 19
0
 def __init__(self, in_channels, nclass, ncodes=32, se_loss=True):
     super(EncModule, self).__init__()
     self.se_loss = se_loss
     self.encoding = nn.Sequential(
         nn.Conv2d(in_channels, in_channels, 1, bias=False),
         InPlaceABNSync(in_channels),
         encoding.nn.Encoding(D=in_channels, K=ncodes),
         # encoding.nn.BatchNorm1d(ncodes),
         nn.BatchNorm1d(ncodes),
         nn.ReLU(inplace=False),
         encoding.nn.Mean(dim=1))
     self.fc = nn.Sequential(nn.Linear(in_channels, in_channels),
                             nn.Sigmoid())
     if self.se_loss:
         self.selayer = nn.Linear(in_channels, nclass)
Esempio n. 20
0
    def __init__(self, in_dim, out_dim, ncls, scale):
        super(ASPOCencModule, self).__init__()

        self.atte_branch = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=3,
                      padding=1,
                      dilation=1,
                      bias=True), InPlaceABNSync(out_dim),
            SelfAttentionModule(in_dim=out_dim,
                                out_dim=out_dim,
                                key_dim=out_dim // 2,
                                value_dim=out_dim,
                                scale=scale))

        self.dilation_0 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_dim))

        self.dilation_1 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=3,
                      padding=12,
                      dilation=12,
                      bias=False), InPlaceABNSync(out_dim))

        self.dilation_2 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=3,
                      padding=24,
                      dilation=24,
                      bias=False), InPlaceABNSync(out_dim))

        self.dilation_3 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=3,
                      padding=36,
                      dilation=36,
                      bias=False), InPlaceABNSync(out_dim))

        self.head_conv = nn.Sequential(
            nn.Conv2d(out_dim * 5, 512, kernel_size=1, padding=0, bias=False),
            InPlaceABNSync(512),
            EncModule(512, nclass=ncls, ncodes=32, se_loss=True))
Esempio n. 21
0
 def __init__(self, in_channels, key_channels, value_channels, out_channels=None, scale=1):
     super(_SelfAttentionBlock, self).__init__()
     self.scale = scale
     self.in_channels = in_channels
     self.out_channels = out_channels
     self.key_channels = key_channels
     self.value_channels = value_channels
     if out_channels == None:
         self.out_channels = in_channels
     self.pool = nn.MaxPool2d(kernel_size=(scale, scale))
     self.f_key = nn.Sequential(
         nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels,
             kernel_size=1, stride=1, padding=0),
         InPlaceABNSync(self.key_channels),
     )
     self.f_query = self.f_key
     self.f_value = nn.Conv2d(in_channels=self.in_channels, out_channels=self.value_channels,
         kernel_size=1, stride=1, padding=0)
     self.W = nn.Conv2d(in_channels=self.value_channels, out_channels=self.out_channels,
         kernel_size=1, stride=1, padding=0)
     nn.init.constant(self.W.weight, 0)
     nn.init.constant(self.W.bias, 0)
    def __init__(self, in_dim, out_dim):
        super(ASPPModule2, self).__init__()

        self.gap = nn.Sequential(nn.AdaptiveAvgPool2d(1),
                                 nn.Conv2d(in_dim, out_dim, 1, bias=False),
                                 InPlaceABNSync(out_dim))

        self.dilation_0 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.dilation_1 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_dim),
            nn.Conv2d(out_dim,
                      out_dim,
                      kernel_size=3,
                      padding=6,
                      dilation=6,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.dilation_2 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_dim),
            nn.Conv2d(out_dim,
                      out_dim,
                      kernel_size=3,
                      padding=12,
                      dilation=12,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.dilation_3 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_dim),
            nn.Conv2d(out_dim,
                      out_dim,
                      kernel_size=3,
                      padding=18,
                      dilation=18,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.project = nn.Sequential(
            nn.Conv2d(out_dim * 5,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      bias=False), InPlaceABNSync(out_dim))
        self.pam0 = PAM_Module(in_dim=out_dim,
                               key_dim=out_dim // 8,
                               value_dim=out_dim,
                               out_dim=out_dim)
Esempio n. 23
0
    def __init__(self, in_dim, out_dim, scale=1):
        super(ASPPModule, self).__init__()
        self.gap = nn.Sequential(nn.AdaptiveAvgPool2d(1),
                                 nn.Conv2d(in_dim, out_dim, 1, bias=False),
                                 InPlaceABNSync(out_dim))

        self.dilation_0 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.dilation_1 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_dim),
            nn.Conv2d(out_dim,
                      out_dim,
                      kernel_size=3,
                      padding=6,
                      dilation=6,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.dilation_2 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_dim),
            nn.Conv2d(out_dim,
                      out_dim,
                      kernel_size=3,
                      padding=12,
                      dilation=12,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.dilation_3 = nn.Sequential(
            nn.Conv2d(in_dim,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      dilation=1,
                      bias=False), InPlaceABNSync(out_dim),
            nn.Conv2d(out_dim,
                      out_dim,
                      kernel_size=3,
                      padding=18,
                      dilation=18,
                      bias=False), InPlaceABNSync(out_dim),
            SEModule(out_dim, reduction=16))

        self.psaa_conv = nn.Sequential(
            nn.Conv2d(in_dim + 5 * out_dim, out_dim, 1, padding=0, bias=False),
            InPlaceABNSync(out_dim), nn.Conv2d(out_dim, 5, 1, bias=True),
            nn.Sigmoid())

        self.project = nn.Sequential(
            nn.Conv2d(out_dim * 5,
                      out_dim,
                      kernel_size=1,
                      padding=0,
                      bias=False), InPlaceABNSync(out_dim))