def __init__(self, upsampling=(2, 2), output_size=None, data_format=None, **kwargs): super(BilinearUpsampling, self).__init__(**kwargs) #self.data_format = conv_utils.normalize_data_format(data_format) #self.data_format = K.backend.common.normalize_data_format(data_format) if keras.__version__ > "2.2.0": from keras.backend import normalize_data_format self.data_format = normalize_data_format(data_format) else: from keras.utils.conv_utils import normalize_data_format self.data_format = normalize_data_format(data_format) self.input_spec = KE.InputSpec(ndim=4) if output_size: self.output_size = conv_utils.normalize_tuple( output_size, 2, 'output_size') self.upsampling = None else: self.output_size = None self.upsampling = conv_utils.normalize_tuple( upsampling, 2, 'upsampling')
def __init__(self, ch_j, n_j, kernel_size=(3, 3), strides=(1, 1), r_num=1, b_alphas=[8, 8, 8], padding='same', data_format='channels_last', dilation_rate=(1, 1), kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, activity_regularizer=None, kernel_constraint=None, **kwargs): super(Conv2DCaps, self).__init__(**kwargs) rank = 2 self.ch_j = ch_j # Number of capsules in layer J self.n_j = n_j # Number of neurons in a capsule in J self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, rank, 'strides') self.r_num = r_num self.b_alphas = b_alphas self.padding = conv_utils.normalize_padding(padding) #self.data_format = conv_utils.normalize_data_format(data_format) self.data_format = K.normalize_data_format(data_format) self.dilation_rate = (1, 1) self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.input_spec = InputSpec(ndim=rank + 3)
def __init__(self, padding=(1, 1), data_format=None, **kwargs): super(ReflectionPadding2D, self).__init__(**kwargs) self.data_format = normalize_data_format(data_format) if isinstance(padding, int): self.padding = ((padding, padding), (padding, padding)) elif hasattr(padding, '__len__'): if len(padding) != 2: raise ValueError('`padding` should have two elements. ' 'Found: ' + str(padding)) height_padding = conv_utils.normalize_tuple(padding[0], 2, '1st entry of padding') width_padding = conv_utils.normalize_tuple(padding[1], 2, '2nd entry of padding') self.padding = (height_padding, width_padding) else: raise ValueError('`padding` should be either an int, ' 'a tuple of 2 ints ' '(symmetric_height_pad, symmetric_width_pad), ' 'or a tuple of 2 tuples of 2 ints ' '((top_pad, bottom_pad), (left_pad, right_pad)). ' 'Found: ' + str(padding)) self.input_spec = InputSpec(ndim=4)
def __init__(self, rank, filters, kernel_size, output_format='signal', strides=1, padding='valid', data_format=None, dilation_rate=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(MFCC, self).__init__(**kwargs) self.rank = rank self.filters = filters self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, rank, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = K.normalize_data_format(data_format) self.kernel_initializer = kernel_initializer self.output_format = output_format self.input_spec = InputSpec(ndim=self.rank + 2)
def __init__(self, data_format=None, **kwargs): super().__init__(**kwargs) if get_backend() == "amd": self.data_format = K.normalize_data_format(data_format) # pylint:disable=no-member else: self.data_format = conv_utils.normalize_data_format(data_format) self.input_spec = InputSpec(ndim=4)
def set_config(self, config_in): self.rank = 2 self.filters = config_in['filters'] self.kernel_size = conv_utils.normalize_tuple(config_in['kernel_size'], self.rank, 'kernel_size') self.strides = conv_utils.normalize_tuple(config_in['strides'], self.rank, 'strides') self.padding = conv_utils.normalize_padding(config_in['padding']) self.data_format = K.normalize_data_format(config_in['data_format']) self.dilation_rate = conv_utils.normalize_tuple( config_in['dilation_rate'], self.rank, 'dilation_rate') self.activation = activations.get(config_in['activation']) self.use_bias = config_in['use_bias'] self.kernel_initializer = initializers.get( config_in['kernel_initializer']) self.bias_initializer = initializers.get(config_in['bias_initializer']) self.kernel_regularizer = regularizers.get( config_in['kernel_regularizer']) self.bias_regularizer = regularizers.get(config_in['bias_regularizer']) self.activity_regularizer = regularizers.get( config_in['activity_regularizer']) self.kernel_constraint = constraints.get( config_in['kernel_constraint']) self.bias_constraint = constraints.get(config_in['bias_constraint']) self.input_spec = InputSpec(ndim=self.rank + 2)
def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), return_sequences=False, go_backwards=False, stateful=False, **kwargs): super(ConvRecurrent2D, self).__init__(**kwargs) self.filters = filters self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, 2, 'strides') self.padding = conv_utils.normalize_padding(padding) import keras if keras.__version__ > "2.1.3": from keras.backend import normalize_data_format # self.data_format = K.normalize_data_format(data_format) self.data_format = normalize_data_format(data_format) else: self.data_format = conv_utils.normalize_data_format(data_format) # self.data_format = conv_utils.normalize_data_format(data_format) self.dilation_rate = conv_utils.normalize_tuple( dilation_rate, 2, 'dilation_rate') self.return_sequences = return_sequences self.go_backwards = go_backwards self.stateful = stateful self.input_spec = [InputSpec(ndim=5)] self.state_spec = None
def __init__(self, loss_function, lats, data_format='channels_last', weighting='cosine'): """ Initialize a weighted loss. :param loss_function: method: Keras loss function to apply after the weighting :param lats: ndarray: 1-dimensional array of latitude coordinates :param data_format: Keras data_format ('channels_first' or 'channels_last') :param weighting: str: type of weighting to apply. Options are: cosine: weight by the cosine of the latitude (default) midlatitude: weight by the cosine of the latitude but also apply a 25% reduction to the equator and boost to the mid-latitudes """ self.loss_function = loss_function self.lats = lats self.data_format = K.normalize_data_format(data_format) if weighting not in ['cosine', 'midlatitude']: raise ValueError( "'weighting' must be one of 'cosine' or 'midlatitude'") self.weighting = weighting lat_tensor = K.zeros(lats.shape) print(lats) lat_tensor.assign(K.cast_to_floatx(lats[:])) self.weights = K.cos(lat_tensor * np.pi / 180.) if self.weighting == 'midlatitude': self.weights = self.weights - 0.25 * K.sin( lat_tensor * 2 * np.pi / 180.) self.is_init = False self.__name__ = 'latitude_weighted_loss'
def __init__(self, size, data_format=None, **kwargs): # self.rank is 1 for UpSampling1D, 2 for UpSampling2D. self.rank = len(size) self.size = size self.data_format = K.normalize_data_format(data_format) self.input_spec = InputSpec(ndim=self.rank + 2) super(_UpSampling, self).__init__(**kwargs)
def __init__(self, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, shape=(0, ((3, 3, 64, 64), 64)), **kwargs): self.data_format = K.normalize_data_format(None) self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) super(Conv2DInfer, self).__init__(**kwargs) self.shape = shape self.activationname = activation
def __init__(self, filters, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(MirrorConv2D, self).__init__(**kwargs) self.rank = 2 self.filters = filters self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, 2, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = K.normalize_data_format(data_format) self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2, 'dilation_rate') self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = InputSpec(ndim=self.rank + 2)
def __init__(self, padding, data_format='channels_last', **kwargs): # self.rank is 1 for ZeroPadding1D, 2 for ZeroPadding2D. self.rank = len(padding) self.padding = padding self.data_format = K.normalize_data_format(data_format) self.input_spec = InputSpec(ndim=self.rank + 2) super(_ZeroPadding, self).__init__(**kwargs)
def __init__(self, data_format=None, **kwargs): super(_GlobalPooling2D, self).__init__(**kwargs) if get_backend() == "amd": self.data_format = K.normalize_data_format(data_format) else: self.data_format = conv_utils.normalize_data_format(data_format) self.input_spec = InputSpec(ndim=4)
def __init__(self, size=(2, 2), data_format=None, **kwargs): super().__init__(**kwargs) if get_backend() == "amd": self.data_format = K.normalize_data_format(data_format) else: self.data_format = conv_utils.normalize_data_format(data_format) self.size = conv_utils.normalize_tuple(size, 2, 'size')
def __init__(self, output_dim=(1, 1), data_format=None, **kwargs): super(ResizeImages, self).__init__(**kwargs) normalized_data_format = K.normalize_data_format(data_format) self.output_dim = conv_utils.normalize_tuple(output_dim, 2, 'output_dim') self.data_format = normalized_data_format self.input_spec = InputSpec(ndim=4)
def __init__(self, scale_factor=2, data_format=None, **kwargs): super(SubPixelUpscaling, self).__init__(**kwargs) self.scale_factor = scale_factor self.data_format = normalize_data_format(data_format)
def __init__(self, rank, filters, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, activation=None, use_bias=True, kernel_initializer='quaternion', bias_initializer='zeros', gamma_diag_initializer=sqrt_init, gamma_off_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, gamma_diag_regularizer=None, gamma_off_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, gamma_diag_constraint=None, gamma_off_constraint=None, init_criterion='he', seed=None, epsilon=1e-7, **kwargs): super(QuaternionConv, self).__init__(**kwargs) self.rank = rank self.filters = filters self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, rank, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = K.normalize_data_format(data_format) self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate') self.activation = activations.get(activation) self.use_bias = use_bias self.init_criterion = init_criterion self.epsilon = epsilon if kernel_initializer in ['quaternion', 'quaternion_independent']: self.kernel_initializer = kernel_initializer else: self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.gamma_diag_initializer = initializers.get(gamma_diag_initializer) self.gamma_off_initializer = initializers.get(gamma_off_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.gamma_diag_regularizer = regularizers.get(gamma_diag_regularizer) self.gamma_off_regularizer = regularizers.get(gamma_off_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.gamma_diag_constraint = constraints.get(gamma_diag_constraint) self.gamma_off_constraint = constraints.get(gamma_off_constraint) if seed is None: self.seed = np.random.randint(1, 10e6) else: self.seed = seed self.input_spec = InputSpec(ndim=self.rank + 2)
def __init__(self, scale_factor=2, data_format=None, **kwargs): super().__init__(**kwargs) self.scale_factor = scale_factor if get_backend() == "amd": self.data_format = K.normalize_data_format(data_format) # pylint:disable=no-member else: self.data_format = conv_utils.normalize_data_format(data_format)
def __init__(self, target_layer, data_format=None, **kwargs): super(Interpolate, self).__init__(**kwargs) self.target_layer = target_layer self.target_shape = _collect_input_shape(target_layer) # self.data_format = conv_utils.normalize_data_format(data_format) self.data_format = K.normalize_data_format(data_format) self.input_spec = InputSpec(ndim=4)
def __init__(self, scale_factor=2, data_format=None, **kwargs): super(SubPixelUpscaling, self).__init__(**kwargs) self.scale_factor = scale_factor if get_backend() == "amd": self.data_format = K.normalize_data_format(data_format) else: self.data_format = conv_utils.normalize_data_format(data_format)
def __init__(self, strides=2, data_format=None, **kwargs): super(Reorg2D, self).__init__(**kwargs) self.strides = strides if keras.__version__ == '2.2.2': self.data_format = K.normalize_data_format(data_format) else: self.data_format = conv_utils.normalize_data_format(data_format) self.input_spec = InputSpec(ndim=4)
def __init__(self, data_format=None, **kwargs): super(Maxima2D, self).__init__(**kwargs) # Update to K.normalize_data_format after keras 2.2.0 if parse_version(keras.__version__) > parse_version("2.2.0"): self.data_format = K.normalize_data_format(data_format) else: self.data_format = conv_utils.normalize_data_format(data_format) self.input_spec = InputSpec(ndim=4)
def __init__(self, size=(2, 2), data_format=None, **kwargs): super(BicubicUpSampling2D, self).__init__(**kwargs) self.data_format = K.normalize_data_format(data_format) self.size = conv_utils.normalize_tuple(size, 2, 'size') self.input_spec = InputSpec(ndim=4)
def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation='tanh', recurrent_activation='sigmoid', beta=1., use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='glorot_uniform', bias_initializer='zeros', use_chrono_initialization=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0., recurrent_dropout=0., **kargs): super(ConvJANetCell, self).__init__(**kargs) self.filters = filters self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, 2, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = K.normalize_data_format(data_format) self.dilation_rate = conv_utils.normalize_tuple( dilation_rate, 2, 'dilation_rate') self.activation = activations.get(activation) self.recurrent_activation = activations.get(recurrent_activation) self.beta = beta self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.recurrent_initializer = initializers.get(recurrent_initializer) self.bias_initializer = initializers.get(bias_initializer) self.use_chrono_initialization = use_chrono_initialization self.kernel_regularizer = regularizers.get(kernel_regularizer) self.recurrent_regularizer = regularizers.get(recurrent_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.recurrent_constraint = constraints.get(recurrent_constraint) self.bias_constraint = constraints.get(bias_constraint) self.dropout = min(1., max(0., dropout)) self.recurrent_dropout = min(1., max(0., recurrent_dropout)) self.state_size = (self.filters, self.filters) self._dropout_mask = None self._recurrent_dropout_mask = None
def __init__(self, size=(2, 2), num_pixels = (0, 0), data_format='channels_last', method_name='FgSegNet_M', **kwargs): super(MyUpSampling2D, self).__init__(**kwargs) #self.data_format = conv_utils.normalize_data_format(data_format) self.data_format = K.normalize_data_format(data_format) self.size = conv_utils.normalize_tuple(size, 2, 'size') self.input_spec = InputSpec(ndim=4) self.num_pixels = num_pixels self.method_name = method_name assert method_name in ['FgSegNet_M', 'FgSegNet_S', 'FgSegNet_v2'], 'Provided method_name is incorrect.'
def __init__(self, size=(2, 2), data_format=None, interpolation='nearest', **kwargs): super(UpSampling2D, self).__init__(**kwargs) self.data_format = normalize_data_format(data_format) self.interpolation = interpolation self.size = conv_utils.normalize_tuple(size, 2, 'size') self.input_spec = InputSpec(ndim=4)
def __init__(self, k=1, sorted=True, data_format='channels_last', **kwargs): super(KMaxPooling, self).__init__(**kwargs) self.input_spec = InputSpec(ndim=3) self.k = k self.sorted = sorted self.data_format = K.normalize_data_format(data_format)
def __init__(self,upsampling=(2,2),output_size=None,data_format=None,**kwargs): super(BilinearUpsampling,self).__init__(**kwargs) self.data_format=K.normalize_data_format(data_format) self.input_spec=InputSpec(ndim=4) if output_size: self.output_size=conv_utils.normalize_tuple(output_size,2,'output_size') self.upsampling=None else: self.output_size=None self.upsampling=conv_utils.normalize_tuple(upsampling,2,'upsampling')
def __init__(self, upsample_factor=1, rotation_resolution=1, rotation_guess=0, data_format=None, **kwargs): super(RegisterRotation2D, self).__init__(**kwargs) self.data_format = normalize_data_format(data_format) self.upsample_factor = upsample_factor self.rotation_resolution = rotation_resolution self.rotation_guess = rotation_guess
def __init__(self, index=None, coordinate_scale=1., confidence_scale=255., data_format=None, **kwargs): super(Maxima2D, self).__init__(**kwargs) self.data_format = normalize_data_format(data_format) self.input_spec = InputSpec(ndim=4) self.index = index self.coordinate_scale = coordinate_scale self.confidence_scale = confidence_scale
def __init__(self, upsampling=(2, 2), output_size=None, data_format=None, **kwargs): super(BilinearUpsampling, self).__init__(**kwargs) self.data_format = K.normalize_data_format(data_format) self.input_spec = InputSpec(ndim=4) if output_size: self.output_size = conv_utils.normalize_tuple( output_size, 2, 'output_size') self.upsampling = None else: self.output_size = None self.upsampling = conv_utils.normalize_tuple( upsampling, 2, 'upsampling')
def __init__(self, axis=-1, gamma_init='one', beta_init='zero', gamma_regularizer=None, beta_regularizer=None, epsilon=1e-6, group=32, data_format=None, **kwargs): self.beta = None self.gamma = None super(GroupNormalization, self).__init__(**kwargs) self.axis = to_list(axis) self.gamma_init = initializers.get(gamma_init) self.beta_init = initializers.get(beta_init) self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_regularizer = regularizers.get(beta_regularizer) self.epsilon = epsilon self.group = group self.data_format = K.normalize_data_format(data_format) self.supports_masking = True
def test_invalid_data_format(): with pytest.raises(ValueError): K.normalize_data_format('channels_middle')
def __init__(self, scale_factor=2, data_format=None, **kwargs): super(SubPixelUpscaling, self).__init__(**kwargs) self.scale_factor = scale_factor self.data_format = K.normalize_data_format(data_format)
def __init__(self, size=(2, 2), data_format=None, **kwargs): super(PixelShuffler, self).__init__(**kwargs) self.data_format = K.normalize_data_format(data_format) self.size = conv_utils.normalize_tuple(size, 2, 'size')