def __init__(self,
                 kernel_size=(3, 3),
                 num_filters=None,
                 strides=1,
                 auto_pad=True,
                 padding_mode='zero',
                 activation=None,
                 normalization=None,
                 use_spectral=False,
                 use_bias=False,
                 dilation=1,
                 groups=1,
                 add_noise=False,
                 noise_intensity=0.005,
                 dropout_rate=0,
                 name=None,
                 depth_multiplier=None,
                 sequence_rank='cna',
                 **kwargs):
        super(TransConv2d_Block, self).__init__(name=name)
        if sequence_rank in ['cna', 'nac']:
            self.sequence_rank = sequence_rank
        self.kernel_size = kernel_size
        self.num_filters = num_filters
        self.strides = strides
        self.auto_pad = auto_pad

        self.use_bias = use_bias
        self.dilation = dilation
        self.groups = groups

        self.add_noise = add_noise
        self.noise_intensity = noise_intensity
        self.dropout_rate = dropout_rate
        self.depth_multiplier = depth_multiplier
        self.use_spectral = use_spectral
        if not self.use_spectral:
            self.conv = TransConv2d(kernel_size=self.kernel_size,
                                    num_filters=self.num_filters,
                                    strides=self.strides,
                                    auto_pad=self.auto_pad,
                                    activation=None,
                                    use_bias=self.use_bias,
                                    dilation=self.dilation,
                                    groups=self.groups,
                                    name=self._name,
                                    depth_multiplier=self.depth_multiplier)
            self.norm = get_normalization(normalization)
        self.activation = get_activation(activation)
        self.droupout = None
    def __init__(self,
                 kernel_size=(3, 3),
                 depth_multiplier=1,
                 strides=1,
                 auto_pad=True,
                 padding=None,
                 padding_mode='zero',
                 activation=None,
                 normalization=None,
                 use_spectral=False,
                 use_bias=False,
                 dilation=1,
                 groups=1,
                 add_noise=False,
                 noise_intensity=0.005,
                 dropout_rate=0,
                 name=None,
                 keep_output=False,
                 sequence_rank='cna',
                 **kwargs):
        super(DepthwiseConv2d_Block, self).__init__(name=name)
        if sequence_rank in ['cna', 'nac']:
            self.sequence_rank = sequence_rank
        self.kernel_size = kernel_size
        self.depth_multiplier = depth_multiplier

        self.strides = strides
        self.auto_pad = auto_pad
        self.padding = 0
        self.padding_mode = padding_mode
        # if self.auto_pad == False:
        #     self.padding = 0
        # else:
        #     self.padding= tuple([n-2 for n in  list(self.kernel_size)]) if hasattr(self.kernel_size,'__len__') else
        #     self.kernel_size-2

        self.use_bias = use_bias
        self.dilation = dilation

        self.add_noise = add_noise
        self.noise_intensity = noise_intensity
        self.dropout_rate = dropout_rate
        self.conv = None
        self.norm = get_normalization(normalization)
        self.use_spectral = use_spectral
        self.activation = get_activation(activation)
        self.droupout = None
        self.keep_output = keep_output
        self._name = name
    def __init__(self,
                 kernel_size=(3, 3, 3),
                 num_filters=32,
                 strides=1,
                 input_shape=None,
                 auto_pad=True,
                 activation='leaky_relu',
                 normalization=None,
                 use_bias=False,
                 dilation=1,
                 groups=1,
                 add_noise=False,
                 noise_intensity=0.001,
                 dropout_rate=0,
                 name=None,
                 sequence_rank='cna',
                 **kwargs):
        super(TransConv3d_Block, self).__init__(name=name)
        if sequence_rank in ['cna', 'nac']:
            self.sequence_rank = sequence_rank
        if add_noise:
            noise = tf.keras.layers.GaussianNoise(noise_intensity)
            self.add(noise)
        self._conv = TransConv3d(kernel_size=kernel_size,
                                 num_filters=num_filters,
                                 strides=strides,
                                 input_shape=input_shape,
                                 auto_pad=auto_pad,
                                 activation=None,
                                 use_bias=use_bias,
                                 dilation=dilation,
                                 groups=groups)
        self.add(self._conv)

        self.norm = get_normalization(normalization)
        if self.norm is not None:
            self.add(self.norm)

        self.activation = get_activation(snake2camel(activation))
        if self.activation is not None:
            self.add(self.activation)
        if dropout_rate > 0:
            self.drop = Dropout(dropout_rate)
            self.add(self.drop)
    def __init__(self,
                 kernel_size=3,
                 num_filters=None,
                 strides=1,
                 auto_pad=True,
                 padding_mode='zero',
                 activation=None,
                 normalization=None,
                 use_spectral=False,
                 use_bias=False,
                 dilation=1,
                 groups=1,
                 add_noise=False,
                 noise_intensity=0.005,
                 dropout_rate=0,
                 name=None,
                 depth_multiplier=None,
                 keep_output=False,
                 sequence_rank='cna',
                 **kwargs):
        super(Conv1d_Block, self).__init__(name=name, keep_output=keep_output)
        if sequence_rank in ['cna', 'nac']:
            self.sequence_rank = sequence_rank
        else:
            self.sequence_rank = 'cna'
        self.kernel_size = kernel_size
        self.num_filters = num_filters
        self.strides = strides
        self.auto_pad = auto_pad
        self.padding = 0
        self.padding_mode = padding_mode

        # if self.auto_pad == False:
        #     self.padding = 0
        # else:
        #     self.padding= tuple([n-2 for n in  list(self.kernel_size)]) if hasattr(self.kernel_size,'__len__') else
        #     self.kernel_size-2

        self.use_bias = use_bias
        self.dilation = dilation
        self.groups = groups
        self.depth_multiplier = depth_multiplier
        self.add_noise = add_noise
        self.noise_intensity = noise_intensity
        self.dropout_rate = dropout_rate

        norm = get_normalization(normalization)
        conv = Conv1d(kernel_size=self.kernel_size,
                      num_filters=self.num_filters,
                      strides=self.strides,
                      auto_pad=self.auto_pad,
                      padding_mode=self.padding_mode,
                      activation=None,
                      use_bias=self.use_bias,
                      dilation=self.dilation,
                      groups=self.groups,
                      name=self._name,
                      depth_multiplier=self.depth_multiplier)
        self.use_spectral = use_spectral
        # if isinstance(norm, SpectralNorm):
        #     self.use_spectral = True
        #     norm = None
        #     conv= nn.utils.spectral_norm(conv)
        if (hasattr(self, 'sequence_rank') and self.sequence_rank
                == 'cna') or not hasattr(self, 'sequence_rank'):
            self.conv = conv
            self.norm = norm
            self.activation = get_activation(activation)
        elif self.sequence_rank == 'nac':
            self.norm = norm
            self.activation = get_activation(activation)
            self.conv = conv
    def __init__(self,
                 kernel_size=(3, 3),
                 num_filters=None,
                 strides=1,
                 auto_pad=True,
                 padding_mode='zero',
                 activation=None,
                 normalization=None,
                 use_spectral=False,
                 use_bias=False,
                 dilation=1,
                 groups=1,
                 add_noise=False,
                 noise_intensity=0.005,
                 dropout_rate=0,
                 name=None,
                 depth_multiplier=None,
                 keep_output=False,
                 sequence_rank='cna',
                 **kwargs):
        super(Conv2d_Block, self).__init__(name=name)
        if sequence_rank in ['cna', 'nac']:
            self.sequence_rank = sequence_rank
        self.kernel_size = kernel_size
        self.num_filters = num_filters
        self.strides = strides
        self.keep_output = keep_output
        padding = kwargs.get('padding', None)
        if 'padding' in kwargs:
            kwargs.pop('padding')
        if isinstance(padding, str) and auto_pad == False:
            auto_pad = (padding.lower() == 'same')
        elif isinstance(padding, int) and padding > 0:
            padding = _pair(padding)
            auto_pad = False
        elif isinstance(padding, tuple):
            auto_pad = False
            pass
        self.auto_pad = auto_pad
        self.padding = padding

        self.use_bias = use_bias
        self.dilation = dilation
        self.groups = groups

        self.add_noise = add_noise
        self.noise_intensity = noise_intensity
        self.dropout_rate = dropout_rate
        self.depth_multiplier = depth_multiplier
        self.use_spectral = use_spectral
        if not self.use_spectral:
            self.conv = Conv2d(kernel_size=self.kernel_size,
                               num_filters=self.num_filters,
                               strides=self.strides,
                               auto_pad=self.auto_pad,
                               activation=None,
                               use_bias=self.use_bias,
                               dilation=self.dilation,
                               groups=self.groups,
                               depth_multiplier=self.depth_multiplier,
                               padding=self.padding,
                               **kwargs)
            self.norm = get_normalization(normalization)
        self.activation = get_activation(activation)
        self.droupout = None
    def __init__(self,
                 num_filters=None,
                 activation=None,
                 normalization=None,
                 use_spectral=False,
                 use_bias=False,
                 add_noise=False,
                 noise_intensity=0.005,
                 dropout_rate=0,
                 name=None,
                 depth_multiplier=None,
                 keep_output=False,
                 sequence_rank='cna',
                 **kwargs):
        super(FullConnect_Block, self).__init__(name=name,
                                                keep_output=keep_output)

        if sequence_rank in ['fna', 'naf', 'afn']:
            self.sequence_rank = sequence_rank
        else:
            self.sequence_rank = 'fna'

        self.num_filters = num_filters

        self.use_bias = use_bias

        self.add_noise = add_noise
        self.noise_intensity = noise_intensity
        self.dropout_rate = dropout_rate
        self.droupout = None
        self.depth_multiplier = depth_multiplier
        self.keep_output = keep_output

        norm = get_normalization(normalization)
        fc = Dense(num_filters=self.num_filters,
                   activation=None,
                   use_bias=self.use_bias,
                   depth_multiplier=self.depth_multiplier).to(self.device)
        self.use_spectral = use_spectral
        if isinstance(norm, SpectralNorm):
            self.use_spectral = True
            norm = None
            fc = SpectralNorm(module=fc)
        if (hasattr(self, 'sequence_rank') and self.sequence_rank
                == 'fna') or not hasattr(self, 'sequence_rank'):
            self.add_module('fc', fc)
            self.add_module('norm', norm)
            self.add_module('activation',
                            get_activation(activation, only_layer=True))

        elif self.sequence_rank == 'naf':
            self.add_module('norm', norm)
            self.add_module('activation',
                            get_activation(activation, only_layer=True))
            self.add_module('fc', fc)

        elif self.sequence_rank == 'afn':
            self.add_module('activation',
                            get_activation(activation, only_layer=True))
            self.add_module('fc', fc)
            self.add_module('norm', norm)
        self._name = name