Exemple #1
0
    def __init__(self,
                 in_chans,
                 out_chans,
                 use_att=True,
                 reduction=16,
                 use_gap=True,
                 use_gmp=True):
        super().__init__()
        self.in_chans = in_chans
        self.out_chans = out_chans

        self.layers = nn.Sequential(
            nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1,
                      bias=True), nn.ReLU(),
            nn.Conv2d(out_chans,
                      out_chans,
                      kernel_size=3,
                      padding=1,
                      bias=False), nn.BatchNorm2d(num_features=out_chans),
            nn.ReLU())
        self.use_att = use_att
        self.att = ChannelAttention(num_chans=out_chans,
                                    reduction=reduction,
                                    use_gap=use_gap,
                                    use_gmp=use_gmp)
 def __init__(self,
              num_chans,
              kernel_size=3,
              res_scale=1.,
              use_ca=True,
              reduction=16,
              use_gap=True,
              use_gmp=True):
     super().__init__()
     assert kernel_size % 2, 'Kernel size is expected to be an odd number.'
     self.layer = nn.Sequential(
         nn.Conv2d(in_channels=num_chans,
                   out_channels=num_chans,
                   kernel_size=kernel_size,
                   padding=kernel_size // 2),
         nn.ReLU(),
         nn.Conv2d(in_channels=num_chans,
                   out_channels=num_chans,
                   kernel_size=kernel_size,
                   padding=kernel_size // 2),
     )
     self.ca = ChannelAttention(num_chans=num_chans,
                                reduction=reduction,
                                use_gap=use_gap,
                                use_gmp=use_gmp)
     self.res_scale = res_scale
     self.use_ca = use_ca
Exemple #3
0
    def __init__(self,
                 in_chans,
                 out_chans,
                 num_groups,
                 use_att=True,
                 reduction=16,
                 use_gap=True,
                 use_gmp=True):
        super().__init__()
        self.in_chans = in_chans
        self.out_chans = out_chans

        self.layers = nn.Sequential(
            nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1,
                      bias=True), nn.ReLU(),
            nn.Conv2d(out_chans,
                      out_chans,
                      kernel_size=3,
                      padding=1,
                      bias=False),
            nn.GroupNorm(num_groups=num_groups, num_channels=out_chans),
            nn.ReLU())

        self.use_att = use_att if (use_gap or use_gmp) else False
        self.att = ChannelAttention(num_chans=out_chans,
                                    reduction=reduction,
                                    use_gap=use_gap,
                                    use_gmp=use_gmp)
    def __init__(self, in_chans, out_chans, stride=2,
                 use_ca=True, reduction=16, use_gap=True, use_gmp=True):
        super().__init__()
        self.in_chans = in_chans
        self.out_chans = out_chans
        self.use_ca = use_ca

        self.layers = nn.Sequential(
            nn.Conv2d(in_chans, out_chans, kernel_size=3, stride=stride, padding=1, bias=True),
            nn.ReLU(),
            nn.Conv2d(out_chans, out_chans, kernel_size=3, stride=1, padding=1, bias=True)
        )
        self.ca = ChannelAttention(num_chans=out_chans, reduction=reduction, use_gap=use_gap, use_gmp=use_gmp)
Exemple #5
0
    def __init__(self, in_chans, out_chans, num_groups, negative_slope=0.01, stride=2,
                 use_ca=True, reduction=16, use_gap=True, use_gmp=True):
        super().__init__()
        self.in_chans = in_chans
        self.out_chans = out_chans
        self.use_ca = use_ca

        self.layers = nn.Sequential(
            # Use bias in conv since group-norm batches along many channels.
            nn.Conv2d(in_chans, out_chans, kernel_size=3, stride=stride, padding=1, bias=True),
            nn.GroupNorm(num_groups=num_groups, num_channels=out_chans),
            nn.LeakyReLU(negative_slope=negative_slope),

            # Down-sampling using stride 2 convolution.
            nn.Conv2d(out_chans, out_chans, kernel_size=3, stride=1, padding=1, bias=True),
            nn.GroupNorm(num_groups=num_groups, num_channels=out_chans),
            nn.LeakyReLU(negative_slope=negative_slope)
        )
        self.ca = ChannelAttention(num_chans=out_chans, reduction=reduction, use_gap=use_gap, use_gmp=use_gmp)
    def __init__(self,
                 in_chans,
                 out_chans,
                 num_groups,
                 use_ca=True,
                 reduction=16,
                 use_gap=True,
                 use_gmp=True,
                 use_sa=True,
                 sa_kernel_size=7,
                 sa_dilation=1,
                 use_cap=True,
                 use_cmp=True):

        super().__init__()
        self.in_chans = in_chans
        self.out_chans = out_chans

        self.use_ca = use_ca
        self.use_sa = use_sa

        self.layers = nn.Sequential(
            nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1,
                      bias=True), nn.LeakyReLU(),
            nn.Conv2d(out_chans,
                      out_chans,
                      kernel_size=3,
                      padding=1,
                      bias=False),
            nn.GroupNorm(num_groups=num_groups, num_channels=out_chans),
            nn.LeakyReLU())

        self.ca = ChannelAttention(num_chans=out_chans,
                                   reduction=reduction,
                                   use_gap=use_gap,
                                   use_gmp=use_gmp)
        self.sa = SpatialAttention(kernel_size=sa_kernel_size,
                                   dilation=sa_dilation,
                                   use_cap=use_cap,
                                   use_cmp=use_cmp)