예제 #1
0
 def __init__(
     self,
     in_channels,
     skip_channels,
     out_channels,
     use_batchnorm=True,
     attention_type=None,
 ):
     super().__init__()
     self.conv1 = md.Conv2dReLU(
         in_channels + skip_channels,
         out_channels,
         kernel_size=3,
         padding=1,
         use_batchnorm=use_batchnorm,
     )
     self.attention1 = md.Attention(attention_type,
                                    in_channels=in_channels + skip_channels)
     self.conv2 = md.Conv2dReLU(
         out_channels,
         out_channels,
         kernel_size=3,
         padding=1,
         use_batchnorm=use_batchnorm,
     )
     self.attention2 = md.Attention(attention_type,
                                    in_channels=out_channels)
예제 #2
0
 def __init__(
     self,
     in_channels,
     skip_channels,
     out_channels,
     use_batchnorm=True,
     act_layer='relu',
     attention_type=None,
 ):
     super().__init__()
     self.conv1 = Conv2dAct(in_channels + skip_channels,
                            out_channels,
                            kernel_size=3,
                            padding=1,
                            use_batchnorm=use_batchnorm,
                            act_layer=act_layer)
     self.attention1 = md.Attention(attention_type,
                                    in_channels=in_channels + skip_channels)
     self.conv2 = Conv2dAct(out_channels,
                            out_channels,
                            kernel_size=3,
                            padding=1,
                            use_batchnorm=use_batchnorm,
                            act_layer=act_layer)
     self.attention2 = md.Attention(attention_type,
                                    in_channels=out_channels)