def __init__(
     self,
     in_channels,
     skip_channels,
     out_channels,
     use_batchnorm=True,
     attention_type=None,
 ):
     super().__init__()
     self.conv1 = md.Conv2dReLU(
         in_channels + skip_channels,
         out_channels,
         kernel_size=3,
         padding=1,
         use_batchnorm=use_batchnorm,
     )
     self.attention1 = md.Attention(attention_type,
                                    in_channels=in_channels + skip_channels)
     self.conv2 = md.Conv2dReLU(
         out_channels,
         out_channels,
         kernel_size=3,
         padding=1,
         use_batchnorm=use_batchnorm,
     )
     self.attention2 = md.Attention(attention_type,
                                    in_channels=out_channels)
Exemple #2
0
    def __init__(self, in_channels, out_channels):
        super(GatedContextDecoder, self).__init__()

        self.layer_channels = np.sum(in_channels[:-1])
        self.gate_channels = in_channels[-1]

        in_channels = np.sum(in_channels)

        self.attention = GatedContextAttention(
                                layer_channels=self.layer_channels, 
                                gate_channels=self.gate_channels, 
                                inter_channels=self.gate_channels // 2
                        )
        # self.conv = ConvBnReLu(in_channels, out_channels)
        # self.conv = RRCNN_block(in_channels, out_channels)
        self.conv1 = md.Conv2dReLU(
            in_channels,
            out_channels,
            kernel_size=3,
            padding=1,
            use_batchnorm=True,
        )
        self.conv2 = md.Conv2dReLU(
            out_channels,
            out_channels,
            kernel_size=3,
            padding=1,
            use_batchnorm=True,
        )
        # nn.init.kaiming_normal_(self.conv1[0].weight, mode='fan_in', nonlinearity='relu')
        # nn.init.kaiming_normal_(self.conv2[0].weight, mode='fan_in', nonlinearity='relu')

        self.attention2 = Attention('scse', in_channels=out_channels)
Exemple #3
0
    def __init__(
            self,
            in_channels,
            out_channels,
    ):
        super().__init__()

        in_channels = np.sum(in_channels)

        self.conv1 = md.Conv2dReLU(
            in_channels,
            out_channels,
            kernel_size=3,
            padding=1,
            use_batchnorm=True,
        )
        self.attention1 = Attention('scse', in_channels=in_channels)
        self.conv2 = md.Conv2dReLU(
            out_channels,
            out_channels,
            kernel_size=3,
            padding=1,
            use_batchnorm=True,
        )
        self.attention2 = Attention('scse', in_channels=out_channels)
 def __init__(self,
              in_channels,
              skip_channels,
              out_channels,
              use_batchnorm=True,
              reduction=16):
     # MFAB is just a modified version of SE-blocks, one for skip, one for input
     super(MFAB, self).__init__()
     self.hl_conv = nn.Sequential(
         md.Conv2dReLU(
             in_channels,
             in_channels,
             kernel_size=3,
             padding=1,
             use_batchnorm=use_batchnorm,
         ),
         md.Conv2dReLU(
             in_channels,
             skip_channels,
             kernel_size=1,
             use_batchnorm=use_batchnorm,
         ))
     reduced_channels = max(1, skip_channels // reduction)
     self.SE_ll = nn.Sequential(
         nn.AdaptiveAvgPool2d(1),
         nn.Conv2d(skip_channels, reduced_channels, 1),
         nn.ReLU(inplace=True),
         nn.Conv2d(reduced_channels, skip_channels, 1),
         nn.Sigmoid(),
     )
     self.SE_hl = nn.Sequential(
         nn.AdaptiveAvgPool2d(1),
         nn.Conv2d(skip_channels, reduced_channels, 1),
         nn.ReLU(inplace=True),
         nn.Conv2d(reduced_channels, skip_channels, 1),
         nn.Sigmoid(),
     )
     self.conv1 = md.Conv2dReLU(
         skip_channels +
         skip_channels,  # we transform C-prime form high level to C from skip connection
         out_channels,
         kernel_size=3,
         padding=1,
         use_batchnorm=use_batchnorm,
     )
     self.conv2 = md.Conv2dReLU(
         out_channels,
         out_channels,
         kernel_size=3,
         padding=1,
         use_batchnorm=use_batchnorm,
     )
 def __init__(self, in_channels, out_channels, use_batchnorm=True):
     conv1 = md.Conv2dReLU(
         in_channels,
         out_channels,
         kernel_size=3,
         padding=1,
         use_batchnorm=use_batchnorm,
     )
     conv2 = md.Conv2dReLU(
         out_channels,
         out_channels,
         kernel_size=3,
         padding=1,
         use_batchnorm=use_batchnorm,
     )
     super().__init__(conv1, conv2)
    def __init__(self, in_channels, out_channels, use_batchnorm=True):
        super().__init__()

        self.block = nn.Sequential(
            modules.Conv2dReLU(in_channels,
                               in_channels // 4,
                               kernel_size=1,
                               use_batchnorm=use_batchnorm),
            TransposeX2(in_channels // 4,
                        in_channels // 4,
                        use_batchnorm=use_batchnorm),
            modules.Conv2dReLU(in_channels // 4,
                               out_channels,
                               kernel_size=1,
                               use_batchnorm=use_batchnorm),
        )
 def __init__(self, in_channels, out_channels, pool_size, use_bathcnorm=True):
     super().__init__()
     if pool_size == 1:
         use_bathcnorm = False  # PyTorch does not support BatchNorm for 1x1 shape
     self.pool = nn.Sequential(
         nn.AdaptiveAvgPool2d(output_size=(pool_size, pool_size)),
         modules.Conv2dReLU(in_channels, out_channels, (1, 1), use_batchnorm=use_bathcnorm),
     )
Exemple #8
0
 def get_smooth_layers(self, in_channels, mid_channels):
     blocks = []
     for in_channel in in_channels:
         blocks.append(
             md.Conv2dReLU(in_channel,
                           mid_channels,
                           1,
                           use_batchnorm=self.use_batchnorm))
     return nn.ModuleList(blocks)
Exemple #9
0
    def __init__(self, in_channels, out_channels, use_batchnorm=True):
        super(DAN_Module, self).__init__()
        inter_channels = in_channels // 4

        self.pconv = md.Conv2dReLU(in_channels,
                                   inter_channels,
                                   kernel_size=3,
                                   padding=1,
                                   use_batchnorm=use_batchnorm)
        self.cconv = md.Conv2dReLU(in_channels,
                                   inter_channels,
                                   kernel_size=3,
                                   padding=1,
                                   use_batchnorm=use_batchnorm)
        self.sp = PAM_Module(inter_channels)
        self.sc = CAM_Module(inter_channels)
        self.fusion = nn.Conv2d(in_channels=inter_channels,
                                out_channels=out_channels,
                                kernel_size=1)
Exemple #10
0
 def __init__(self, in_dim):
     super(CAM_Module, self).__init__()
     self.chanel_in = in_dim
     self.gamma = nn.Parameter(torch.zeros(1))
     self.softmax = nn.Softmax(dim=-1)
     self.W = md.Conv2dReLU(in_dim,
                            in_dim,
                            kernel_size=3,
                            padding=1,
                            use_batchnorm=True)
    def __init__(self,
                 in_channels,
                 out_channel,
                 sizes=(1, 2, 4, 8),
                 use_bathcnorm=True):
        super().__init__()

        self.blocks = nn.ModuleList([
            FAMBlock(in_channels,
                     in_channels // len(sizes),
                     size,
                     use_bathcnorm=use_bathcnorm) for size in sizes
        ])

        self.conv = modules.Conv2dReLU(in_channels // len(sizes), out_channel,
                                       (3, 3), 1)
Exemple #12
0
    def __init__(self, in_dim):
        super(PAM_Module, self).__init__()
        self.chanel_in = in_dim

        self.query_conv = nn.Conv2d(in_channels=in_dim,
                                    out_channels=in_dim // 8,
                                    kernel_size=1)
        self.key_conv = nn.Conv2d(in_channels=in_dim,
                                  out_channels=in_dim // 8,
                                  kernel_size=1)
        self.value_conv = nn.Conv2d(in_channels=in_dim,
                                    out_channels=in_dim,
                                    kernel_size=1)
        self.gamma = nn.Parameter(torch.zeros(1))
        self.softmax = nn.Softmax(dim=-1)
        self.W = md.Conv2dReLU(in_dim,
                               in_dim,
                               kernel_size=3,
                               padding=1,
                               use_batchnorm=True)
Exemple #13
0
 def __init__(self,
              in_channels,
              mid_channels,
              out_channels,
              use_batchnorm=True):
     super(UnaryHead, self).__init__()
     self.use_batchnorm = use_batchnorm
     self.smooth = self.get_smooth_layers(in_channels, mid_channels)
     self.conv1 = md.Conv2dReLU(len(in_channels) * mid_channels,
                                mid_channels,
                                3,
                                1,
                                use_batchnorm=self.use_batchnorm)
     self.unary = nn.Sequential(
         nn.Conv2d(mid_channels, out_channels, 3, 1, 1),
         nn.Sigmoid(),
     )
     self.conv2 = nn.Conv2d(mid_channels, mid_channels, 1, 1)
     self.pooling = nn.AdaptiveAvgPool2d((1, 1))
     self.fr = nn.Sequential(
         nn.Linear(mid_channels, out_channels, bias=True),
         nn.Sigmoid(),
     )
    def __init__(
        self,
        encoder_channels,
        use_batchnorm=True,
        out_channels=512,
        dropout=0.2,
    ):
        super().__init__()

        self.psp = PSPModule(
            in_channels=encoder_channels[-1],
            sizes=(1, 2, 3, 6),
            use_bathcnorm=use_batchnorm,
        )

        self.conv = modules.Conv2dReLU(
            in_channels=encoder_channels[-1] * 2,
            out_channels=out_channels,
            kernel_size=1,
            use_batchnorm=use_batchnorm,
        )

        self.dropout = nn.Dropout2d(p=dropout)