def __init__(self, se_filters, num_filters, is_gather_excite=False, activation='relu', use_bias=False, name=''): super(SqueezeExcite, self).__init__(name=name) self.se_filters = se_filters self.num_filters = num_filters self.use_bias = use_bias self.squeeze = Conv2d((1, 1), self.se_filters, strides=1, auto_pad=False, activation=None, use_bias=self.use_bias, name=self.name + '_squeeze') self.excite = Conv2d((1, 1), self.num_filters, strides=1, auto_pad=False, activation=None, use_bias=self.use_bias, name=self.name + '_excite') self.is_gather_excite = is_gather_excite self.activation = get_activation(activation) self.pool = GlobalAvgPool2d()
def __init__(self, *args, axis=1, activation='relu'): """ Parameters ---------- layer_defs : object """ super(ConcateBlock, self).__init__() self.activation = get_activation(activation) self.axis = axis self.has_identity = False for i in range(len(args)): arg = args[i] if isinstance(arg, (Layer, list, dict)): if isinstance(arg, list): arg = Sequential(*arg) elif isinstance(arg, dict) and len(args) == 1: for k, v in arg.items(): if isinstance(v, Identity): self.has_identity = True self.add_module('Identity', v) else: self.add_module(k, v) elif isinstance(arg, dict) and len(args) > 1: raise ValueError( 'more than one dict argument is not support.') elif isinstance(arg, Identity): self.has_identity = True self.add_module('Identity', arg) else: self.add_module('branch{0}'.format(i + 1), arg) if len(self._modules) == 1 and self.has_identity == False: self.add_module('Identity', Identity()) self.to(self.device)
def __init__(self, kernel_size=(3, 3), depth_multiplier=1, strides=1, auto_pad=True, padding_mode='zero', activation=None, normalization=None, use_spectral=False, use_bias=False, dilation=1, add_noise=False, noise_intensity=0.005, dropout_rate=0, name=None, keep_output=False, sequence_rank='cna', **kwargs): super(DepthwiseConv2d_Block, self).__init__(name=name, keep_output=keep_output) if not hasattr(self, 'sequence_rank'): setattr(self, 'sequence_rank', 'cna') if sequence_rank in ['cna', 'nac']: self.sequence_rank = sequence_rank self.kernel_size = kernel_size self.depth_multiplier = depth_multiplier self.strides = strides self.auto_pad = auto_pad self.padding = 0 self.padding_mode = padding_mode # if self.auto_pad == False: # self.padding = 0 # else: # self.padding= tuple([n-2 for n in list(self.kernel_size)]) if hasattr(self.kernel_size,'__len__') else # self.kernel_size-2 self.use_bias = use_bias self.dilation = dilation self.add_noise = add_noise self.noise_intensity = noise_intensity self.dropout_rate = dropout_rate self.conv = DepthwiseConv2d(kernel_size=self.kernel_size, depth_multiplier=self.depth_multiplier, strides=self.strides, auto_pad=self.auto_pad, padding_mode=self.padding_mode, activation=None, use_bias=self.use_bias, dilation=self.dilation, name=self._name).to(self.device) self.norm = get_normalization(normalization) self.use_spectral = use_spectral self.activation = get_activation(activation) self.droupout = None self.keep_output = keep_output self._name = name
def __init__(self, kernel_size=(3, 3), num_filters=None, strides=1, auto_pad=True, padding_mode='zero', activation=None, normalization=None, use_spectral=False, use_bias=False, dilation=1, groups=1, add_noise=False, noise_intensity=0.005, dropout_rate=0, name=None, depth_multiplier=None, keep_output=False, sequence_rank='cna', **kwargs): super(TransConv2d_Block, self).__init__(name=name, keep_output=keep_output) if not hasattr(self, 'sequence_rank'): setattr(self, 'sequence_rank', 'cna') if sequence_rank in ['cna', 'nac']: self.sequence_rank = sequence_rank self.kernel_size = kernel_size self.num_filters = num_filters self.strides = strides self.auto_pad = auto_pad self.padding = 0 self.padding_mode = padding_mode self.use_bias = use_bias self.dilation = dilation self.groups = groups self.add_noise = add_noise self.noise_intensity = noise_intensity self.dropout_rate = dropout_rate self.use_spectral = use_spectral self.depth_multiplier = depth_multiplier self.conv = TransConv2d(kernel_size=self.kernel_size, num_filters=self.num_filters, strides=self.strides, auto_pad=self.auto_pad, padding_mode=self.padding_mode, activation=None, use_bias=self.use_bias, dilation=self.dilation, groups=self.groups, name=self.name, depth_multiplier=self.depth_multiplier).to( self.device) self.norm = get_normalization(normalization) self.activation = get_activation(activation) self.droupout = None self.keep_output = keep_output self._name = name
def __init__(self, *args, axis=1, branch_from=None, activation=None, mode='add', name=None, keep_output=False, **kwargs): """ Args: *args (): axis (): branch_from (): activation (): mode (str): 'add' 'dot' 'concate' name (str): keep_output (bool): **kwargs (): """ super(ShortCut2d, self).__init__(name=name) self.activation = get_activation(activation) self.has_identity = False self.mode = mode self.axis = axis self.branch_from = branch_from self.branch_from_uuid = None self.keep_output = keep_output for i in range(len(args)): arg = args[i] if isinstance(arg, (Layer, torch.Tensor, list, dict)): if isinstance(arg, list): arg = Sequential(*arg) elif isinstance(arg, OrderedDict) and len(args) == 1: for k, v in arg.items(): if isinstance(v, Identity): self.has_identity = True self.add_module('Identity', v) else: self.add_module(k, v) elif isinstance(arg, dict) and len(args) == 1: keys = sorted(list(arg.keys())) for k in keys: v = arg[k] if isinstance(v, Identity): self.has_identity = True self.add_module('Identity', v) else: self.add_module(str(k), v) elif isinstance(arg, (dict, OrderedDict)) and len(args) > 1: raise ValueError( 'more than one dict argument is not support.') elif isinstance(arg, Identity): self.has_identity = True self.add_module('Identity', arg) elif isinstance(arg, nn.Module): if len(arg.name) > 0 and arg.name != arg.default_name: self.add_module(arg.name, arg) else: self.add_module('branch{0}'.format(i + 1), arg) else: raise ValueError('{0} is not support.'.format( arg.__class__.__name)) if len( self._modules ) == 1 and self.has_identity == False and self.branch_from is None and mode != 'concate': self.has_identity = True self.add_module('Identity', Identity()) self.to(self.device)
def __init__(self, kernel_size=(3, 3), num_filters=None, strides=1, auto_pad=True, padding_mode='zero', divisor_rank=0, activation=None, normalization=None, use_spectral=False, use_bias=False, dilation=1, groups=1, add_noise=False, noise_intensity=0.005, dropout_rate=0, name=None, depth_multiplier=None, keep_output=False, sequence_rank='cna', **kwargs): super(GcdConv2d_Block, self).__init__(name=name, keep_output=keep_output) if sequence_rank in ['cna', 'nac']: self.sequence_rank = sequence_rank self.kernel_size = kernel_size self.num_filters = num_filters self.strides = _pair(strides) self.auto_pad = auto_pad self.padding_mode = padding_mode self.use_spectral = use_spectral self.use_bias = use_bias self.dilation = dilation self.groups = groups self.add_noise = add_noise self.noise_intensity = noise_intensity self.dropout_rate = dropout_rate if self.sequence_rank == 'cna': self.conv = GcdConv2d(self.kernel_size, input_filters=self.input_filters, num_filters=self.num_filters, strides=self.strides, auto_pad=self.auto_pad, activation=None, init=None, use_bias=self.use_bias, init_bias=0, divisor_rank=self.divisor_rank, dilation=self.dilation).to(self.device) self.norm = get_normalization(normalization) self.activation = get_activation(activation) elif self.sequence_rank == 'nac': self.norm = get_normalization(normalization) self.activation = get_activation(activation) self.conv = GcdConv2d(self.kernel_size, input_filters=self.input_filters, num_filters=self.num_filters, strides=self.strides, auto_pad=self.auto_pad, activation=None, init=None, use_bias=self.use_bias, init_bias=0, divisor_rank=self.divisor_rank, dilation=self.dilation).to(self.device) self.divisor_rank = divisor_rank
def __init__(self, kernel_size=3, num_filters=None, strides=1, auto_pad=True, padding_mode='zero', activation=None, normalization=None, use_spectral=False, use_bias=False, dilation=1, groups=1, add_noise=False, noise_intensity=0.005, dropout_rate=0, name=None, depth_multiplier=None, keep_output=False, sequence_rank='cna', **kwargs): super(Conv1d_Block, self).__init__(name=name, keep_output=keep_output) if sequence_rank in ['cna', 'nac']: self.sequence_rank = sequence_rank else: self.sequence_rank = 'cna' self.kernel_size = kernel_size self.num_filters = num_filters self.strides = strides self.auto_pad = auto_pad self.padding = 0 self.padding_mode = padding_mode # if self.auto_pad == False: # self.padding = 0 # else: # self.padding= tuple([n-2 for n in list(self.kernel_size)]) if hasattr(self.kernel_size,'__len__') else # self.kernel_size-2 self.use_bias = use_bias self.dilation = dilation self.groups = groups self.depth_multiplier = depth_multiplier self.add_noise = add_noise self.noise_intensity = noise_intensity self.dropout_rate = dropout_rate norm = get_normalization(normalization) conv = Conv1d(kernel_size=self.kernel_size, num_filters=self.num_filters, strides=self.strides, auto_pad=self.auto_pad, padding_mode=self.padding_mode, activation=None, use_bias=self.use_bias, dilation=self.dilation, groups=self.groups, name=self._name, depth_multiplier=self.depth_multiplier).to(self.device) self.use_spectral = use_spectral if isinstance(norm, SpectralNorm): self.use_spectral = True norm = None conv = nn.utils.spectral_norm(conv) if (hasattr(self, 'sequence_rank') and self.sequence_rank == 'cna') or not hasattr(self, 'sequence_rank'): self.conv = conv self.norm = norm self.activation = get_activation(activation) elif self.sequence_rank == 'nac': self.norm = norm self.activation = get_activation(activation) self.conv = conv