def __init__(self, *args, axis=1, activation='relu'):
        """

        Parameters
        ----------
        layer_defs : object
        """
        super(ConcateBlock, self).__init__()
        self.activation = get_activation(activation)
        self.axis = axis
        self.has_identity = False
        for i in range(len(args)):
            arg = args[i]
            if isinstance(arg, (Layer, list, dict)):
                if isinstance(arg, list):
                    arg = Sequential(*arg)
                elif isinstance(arg, dict) and len(args) == 1:
                    for k, v in arg.items():
                        if isinstance(v, Identity):
                            self.has_identity = True
                            self.add_module('Identity', v)
                        else:
                            self.add_module(k, v)
                elif isinstance(arg, dict) and len(args) > 1:
                    raise ValueError(
                        'more than one dict argument is not support.')
                elif isinstance(arg, Identity):
                    self.has_identity = True
                    self.add_module('Identity', arg)
                else:
                    self.add_module('branch{0}'.format(i + 1), arg)
        if len(self._modules) == 1 and self.has_identity == False:
            self.add_module('Identity', Identity())
    def __init__(self,
                 kernel_size=(3, 3),
                 num_filters=None,
                 strides=1,
                 auto_pad=True,
                 padding_mode='zero',
                 activation=None,
                 normalization=None,
                 use_spectral=False,
                 use_bias=False,
                 dilation=1,
                 groups=1,
                 add_noise=False,
                 noise_intensity=0.005,
                 dropout_rate=0,
                 name=None,
                 depth_multiplier=None,
                 sequence_rank='cna',
                 **kwargs):
        super(TransConv2d_Block, self).__init__(name=name)
        if sequence_rank in ['cna', 'nac']:
            self.sequence_rank = sequence_rank
        self.kernel_size = kernel_size
        self.num_filters = num_filters
        self.strides = strides
        self.auto_pad = auto_pad

        self.use_bias = use_bias
        self.dilation = dilation
        self.groups = groups

        self.add_noise = add_noise
        self.noise_intensity = noise_intensity
        self.dropout_rate = dropout_rate
        self.depth_multiplier = depth_multiplier
        self.use_spectral = use_spectral
        if not self.use_spectral:
            self.conv = TransConv2d(kernel_size=self.kernel_size,
                                    num_filters=self.num_filters,
                                    strides=self.strides,
                                    auto_pad=self.auto_pad,
                                    activation=None,
                                    use_bias=self.use_bias,
                                    dilation=self.dilation,
                                    groups=self.groups,
                                    name=self._name,
                                    depth_multiplier=self.depth_multiplier)
            self.norm = get_normalization(normalization)
        self.activation = get_activation(activation)
        self.droupout = None
    def __init__(self,
                 kernel_size=(3, 3),
                 depth_multiplier=1,
                 strides=1,
                 auto_pad=True,
                 padding=None,
                 padding_mode='zero',
                 activation=None,
                 normalization=None,
                 use_spectral=False,
                 use_bias=False,
                 dilation=1,
                 groups=1,
                 add_noise=False,
                 noise_intensity=0.005,
                 dropout_rate=0,
                 name=None,
                 keep_output=False,
                 sequence_rank='cna',
                 **kwargs):
        super(DepthwiseConv2d_Block, self).__init__(name=name)
        if sequence_rank in ['cna', 'nac']:
            self.sequence_rank = sequence_rank
        self.kernel_size = kernel_size
        self.depth_multiplier = depth_multiplier

        self.strides = strides
        self.auto_pad = auto_pad
        self.padding = 0
        self.padding_mode = padding_mode
        # if self.auto_pad == False:
        #     self.padding = 0
        # else:
        #     self.padding= tuple([n-2 for n in  list(self.kernel_size)]) if hasattr(self.kernel_size,'__len__') else
        #     self.kernel_size-2

        self.use_bias = use_bias
        self.dilation = dilation

        self.add_noise = add_noise
        self.noise_intensity = noise_intensity
        self.dropout_rate = dropout_rate
        self.conv = None
        self.norm = get_normalization(normalization)
        self.use_spectral = use_spectral
        self.activation = get_activation(activation)
        self.droupout = None
        self.keep_output = keep_output
        self._name = name
    def __init__(self,
                 se_filters,
                 num_filters,
                 is_gather_excite=False,
                 use_bias=False,
                 name=''):
        super(SqueezeExcite, self).__init__(name=name)

        self.se_filters = se_filters
        self.num_filters = num_filters
        self.squeeze = None
        self.excite = None
        self.is_gather_excite = is_gather_excite
        self.activation = get_activation('swish')
        self.pool = GlobalAvgPool2d(keepdim=True)
        self.use_bias = use_bias
    def __init__(self,
                 kernel_size=(3, 3, 3),
                 num_filters=32,
                 strides=1,
                 input_shape=None,
                 auto_pad=True,
                 activation='leaky_relu',
                 normalization=None,
                 use_bias=False,
                 dilation=1,
                 groups=1,
                 add_noise=False,
                 noise_intensity=0.001,
                 dropout_rate=0,
                 name=None,
                 sequence_rank='cna',
                 **kwargs):
        super(TransConv3d_Block, self).__init__(name=name)
        if sequence_rank in ['cna', 'nac']:
            self.sequence_rank = sequence_rank
        if add_noise:
            noise = tf.keras.layers.GaussianNoise(noise_intensity)
            self.add(noise)
        self._conv = TransConv3d(kernel_size=kernel_size,
                                 num_filters=num_filters,
                                 strides=strides,
                                 input_shape=input_shape,
                                 auto_pad=auto_pad,
                                 activation=None,
                                 use_bias=use_bias,
                                 dilation=dilation,
                                 groups=groups)
        self.add(self._conv)

        self.norm = get_normalization(normalization)
        if self.norm is not None:
            self.add(self.norm)

        self.activation = get_activation(snake2camel(activation))
        if self.activation is not None:
            self.add(self.activation)
        if dropout_rate > 0:
            self.drop = Dropout(dropout_rate)
            self.add(self.drop)
    def __init__(self,
                 *args,
                 axis=-1,
                 branch_from=None,
                 activation=None,
                 mode='add',
                 name=None,
                 keep_output=False,
                 **kwargs):
        """
        Args
        layer_defs : object
        """
        super(ShortCut2d, self).__init__(name=name)
        self.activation = get_activation(activation)
        self.has_identity = False
        self.mode = mode if isinstance(mode, str) else mode
        self.axis = axis
        self.branch_from = branch_from
        self.branch_from_uuid = None
        self.keep_output = keep_output

        for i in range(len(args)):
            arg = args[i]
            if isinstance(arg, (Layer, tf.Tensor, list, dict)):
                if isinstance(arg, list):
                    arg = Sequential(*arg)
                elif isinstance(arg, OrderedDict) and len(args) == 1:
                    for k, v in arg.items():
                        if isinstance(v, Identity):
                            self.has_identity = True
                            self.add_module('Identity', v)
                        else:
                            self.add_module(k, v)
                elif isinstance(arg, dict) and len(args) == 1:
                    keys = sorted(list(arg.keys()))
                    for k in keys:
                        v = arg[k]
                        if isinstance(v, Identity):
                            self.has_identity = True
                            self.add_module('Identity', v)
                        else:
                            self.add_module(str(k), v)
                elif isinstance(arg, (dict, OrderedDict)) and len(args) > 1:
                    raise ValueError(
                        'more than one dict argument is not support.')
                elif isinstance(arg, Identity):
                    self.has_identity = True
                    self.add_module('Identity', arg)
                elif isinstance(arg, Layer):
                    if len(arg.name) > 0 and arg.name != arg._name:
                        self.add_module(arg.name, arg)
                    else:
                        self.add_module('branch{0}'.format(i + 1), arg)
                else:
                    raise ValueError('{0} is not support.'.format(
                        arg.__class__.__name))
        if len(
                self._modules
        ) == 1 and self.has_identity == False and self.branch_from is None:
            self.has_identity = True
            self.add_module('Identity', Identity())
    def __init__(self,
                 kernel_size=3,
                 num_filters=None,
                 strides=1,
                 auto_pad=True,
                 padding_mode='zero',
                 activation=None,
                 normalization=None,
                 use_spectral=False,
                 use_bias=False,
                 dilation=1,
                 groups=1,
                 add_noise=False,
                 noise_intensity=0.005,
                 dropout_rate=0,
                 name=None,
                 depth_multiplier=None,
                 keep_output=False,
                 sequence_rank='cna',
                 **kwargs):
        super(Conv1d_Block, self).__init__(name=name, keep_output=keep_output)
        if sequence_rank in ['cna', 'nac']:
            self.sequence_rank = sequence_rank
        else:
            self.sequence_rank = 'cna'
        self.kernel_size = kernel_size
        self.num_filters = num_filters
        self.strides = strides
        self.auto_pad = auto_pad
        self.padding = 0
        self.padding_mode = padding_mode

        # if self.auto_pad == False:
        #     self.padding = 0
        # else:
        #     self.padding= tuple([n-2 for n in  list(self.kernel_size)]) if hasattr(self.kernel_size,'__len__') else
        #     self.kernel_size-2

        self.use_bias = use_bias
        self.dilation = dilation
        self.groups = groups
        self.depth_multiplier = depth_multiplier
        self.add_noise = add_noise
        self.noise_intensity = noise_intensity
        self.dropout_rate = dropout_rate

        norm = get_normalization(normalization)
        conv = Conv1d(kernel_size=self.kernel_size,
                      num_filters=self.num_filters,
                      strides=self.strides,
                      auto_pad=self.auto_pad,
                      padding_mode=self.padding_mode,
                      activation=None,
                      use_bias=self.use_bias,
                      dilation=self.dilation,
                      groups=self.groups,
                      name=self._name,
                      depth_multiplier=self.depth_multiplier)
        self.use_spectral = use_spectral
        # if isinstance(norm, SpectralNorm):
        #     self.use_spectral = True
        #     norm = None
        #     conv= nn.utils.spectral_norm(conv)
        if (hasattr(self, 'sequence_rank') and self.sequence_rank
                == 'cna') or not hasattr(self, 'sequence_rank'):
            self.conv = conv
            self.norm = norm
            self.activation = get_activation(activation)
        elif self.sequence_rank == 'nac':
            self.norm = norm
            self.activation = get_activation(activation)
            self.conv = conv
    def __init__(self,
                 kernel_size=(3, 3),
                 num_filters=None,
                 strides=1,
                 auto_pad=True,
                 padding_mode='zero',
                 activation=None,
                 normalization=None,
                 use_spectral=False,
                 use_bias=False,
                 dilation=1,
                 groups=1,
                 add_noise=False,
                 noise_intensity=0.005,
                 dropout_rate=0,
                 name=None,
                 depth_multiplier=None,
                 keep_output=False,
                 sequence_rank='cna',
                 **kwargs):
        super(Conv2d_Block, self).__init__(name=name)
        if sequence_rank in ['cna', 'nac']:
            self.sequence_rank = sequence_rank
        self.kernel_size = kernel_size
        self.num_filters = num_filters
        self.strides = strides
        self.keep_output = keep_output
        padding = kwargs.get('padding', None)
        if 'padding' in kwargs:
            kwargs.pop('padding')
        if isinstance(padding, str) and auto_pad == False:
            auto_pad = (padding.lower() == 'same')
        elif isinstance(padding, int) and padding > 0:
            padding = _pair(padding)
            auto_pad = False
        elif isinstance(padding, tuple):
            auto_pad = False
            pass
        self.auto_pad = auto_pad
        self.padding = padding

        self.use_bias = use_bias
        self.dilation = dilation
        self.groups = groups

        self.add_noise = add_noise
        self.noise_intensity = noise_intensity
        self.dropout_rate = dropout_rate
        self.depth_multiplier = depth_multiplier
        self.use_spectral = use_spectral
        if not self.use_spectral:
            self.conv = Conv2d(kernel_size=self.kernel_size,
                               num_filters=self.num_filters,
                               strides=self.strides,
                               auto_pad=self.auto_pad,
                               activation=None,
                               use_bias=self.use_bias,
                               dilation=self.dilation,
                               groups=self.groups,
                               depth_multiplier=self.depth_multiplier,
                               padding=self.padding,
                               **kwargs)
            self.norm = get_normalization(normalization)
        self.activation = get_activation(activation)
        self.droupout = None
Пример #9
0
    def __init__(self,
                 num_filters=None,
                 activation=None,
                 normalization=None,
                 use_spectral=False,
                 use_bias=False,
                 add_noise=False,
                 noise_intensity=0.005,
                 dropout_rate=0,
                 name=None,
                 depth_multiplier=None,
                 keep_output=False,
                 sequence_rank='cna',
                 **kwargs):
        super(FullConnect_Block, self).__init__(name=name,
                                                keep_output=keep_output)

        if sequence_rank in ['fna', 'naf', 'afn']:
            self.sequence_rank = sequence_rank
        else:
            self.sequence_rank = 'fna'

        self.num_filters = num_filters

        self.use_bias = use_bias

        self.add_noise = add_noise
        self.noise_intensity = noise_intensity
        self.dropout_rate = dropout_rate
        self.droupout = None
        self.depth_multiplier = depth_multiplier
        self.keep_output = keep_output

        norm = get_normalization(normalization)
        fc = Dense(num_filters=self.num_filters,
                   activation=None,
                   use_bias=self.use_bias,
                   depth_multiplier=self.depth_multiplier).to(self.device)
        self.use_spectral = use_spectral
        if isinstance(norm, SpectralNorm):
            self.use_spectral = True
            norm = None
            fc = SpectralNorm(module=fc)
        if (hasattr(self, 'sequence_rank') and self.sequence_rank
                == 'fna') or not hasattr(self, 'sequence_rank'):
            self.add_module('fc', fc)
            self.add_module('norm', norm)
            self.add_module('activation',
                            get_activation(activation, only_layer=True))

        elif self.sequence_rank == 'naf':
            self.add_module('norm', norm)
            self.add_module('activation',
                            get_activation(activation, only_layer=True))
            self.add_module('fc', fc)

        elif self.sequence_rank == 'afn':
            self.add_module('activation',
                            get_activation(activation, only_layer=True))
            self.add_module('fc', fc)
            self.add_module('norm', norm)
        self._name = name
Пример #10
0
    def __init__(self,
                 *args,
                 axis=-1,
                 branch_from=None,
                 activation=None,
                 mode='add',
                 name=None,
                 keep_output=False,
                 **kwargs):
        """

        Args:
            *args ():
            axis ():
            branch_from ():
            activation ():
            mode (str):  'add' 'dot' 'concate'
            name (str):
            keep_output (bool):
            **kwargs ():

        """
        super(ShortCut, self).__init__(name=name, keep_output=keep_output)
        valid_mode = ['add', 'subtract', 'concate', 'dot', 'maxout']
        if mode in valid_mode:
            self.mode = mode
        else:
            raise ValueError(
                '{0} is not valid mode. please use one of {1}'.format(
                    mode, valid_mode))
        self.activation = get_activation(activation)
        self.has_identity = False

        self.axis = axis
        self.branch_from = branch_from
        self.branch_from_uuid = None

        self.keep_output = keep_output

        for i in range(len(args)):
            arg = args[i]
            if isinstance(arg, (Layer, Tensor, list, dict)):
                if isinstance(arg, list):
                    arg = Sequential(*arg)
                elif isinstance(arg, OrderedDict) and len(args) == 1:
                    for k, v in arg.items():
                        if isinstance(v, Identity):
                            self.has_identity = True
                            self.add_module('Identity', v)
                        else:
                            self.add_module(k, v)
                elif isinstance(arg, dict) and len(args) == 1:
                    keys = sorted(list(arg.keys()))
                    for k in keys:
                        v = arg[k]
                        if isinstance(v, Identity):
                            self.has_identity = True
                            self.add_module('Identity', v)
                        else:
                            self.add_module(str(k), v)
                elif isinstance(arg, (dict, OrderedDict)) and len(args) > 1:
                    raise ValueError(
                        'more than one dict argument is not support.')

                elif isinstance(arg, Identity):
                    self.has_identity = True
                    self.add_module('Identity', arg)
                elif isinstance(arg, Layer):
                    if len(arg.name) > 0 and arg.name != arg.default_name:
                        self.add_module(arg.name, arg)
                    else:
                        self.add_module('branch{0}'.format(i + 1), arg)
                else:
                    raise ValueError('{0} is not support.'.format(
                        arg.__class__.__name))
        if len(
                self._modules
        ) == 1 and self.has_identity == False and self.branch_from is None and mode != 'concate':
            self.has_identity = True
            self.add_module('Identity', Identity())
        self.to(self.device)