def __init__( self, axis: Union[List[int], Tuple[int], int] = -1, momentum: float = 0.99, center: bool = True, scale: bool = True, epsilon: float = 0.001, beta_initializer=Zeros(), gamma_initializer=Ones(), dtype=DEFAULT_COMPLEX_TYPE, moving_mean_initializer=Zeros(), moving_variance_initializer=Ones(), cov_method: int = 2, # TODO: Check inits **kwargs): self.my_dtype = tf.dtypes.as_dtype(dtype) self.epsilon = epsilon self.cov_method = cov_method if isinstance(axis, int): axis = [axis] self.axis = list(axis) super(ComplexBatchNormalization, self).__init__(**kwargs) self.momentum = momentum self.beta_initializer = initializers.get(beta_initializer) self.gamma_initializer = initializers.get(gamma_initializer) self.moving_mean_initializer = initializers.get( moving_mean_initializer) self.moving_variance_initializer = initializers.get( moving_variance_initializer) self.center = center self.scale = scale
def __init__( self, units: int, activation: t_activation = None, use_bias: bool = True, kernel_initializer=ComplexGlorotUniform(), bias_initializer=Zeros(), dtype=DEFAULT_COMPLEX_TYPE, # TODO: Check typing of this. **kwargs): """ :param units: Positive integer, dimensionality of the output space. :param activation: Activation function to use. Either from keras.activations or cvnn.activations. For complex dtype, only cvnn.activations module supported. If you don't specify anything, no activation is applied (ie. "linear" activation: a(x) = x). :param use_bias: Boolean, whether the layer uses a bias vector. :param kernel_initializer: Initializer for the kernel weights matrix. Recomended to use a `ComplexInitializer` such as `cvnn.initializers.ComplexGlorotUniform()` (default) :param bias_initializer: Initializer for the bias vector. Recomended to use a `ComplexInitializer` such as `cvnn.initializers.Zeros()` (default) :param dtype: Dtype of the input and layer. """ # TODO: verify the initializers? and that dtype complex has cvnn.activations. super(ComplexDense, self).__init__(units, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, **kwargs) # !Cannot override dtype of the layer because it has a read-only @property self.my_dtype = tf.dtypes.as_dtype(dtype)
def __init__(self, filters, kernel_size, dtype=DEFAULT_COMPLEX_TYPE, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1), groups=1, activation=None, use_bias=True, kernel_initializer=ComplexGlorotUniform(), bias_initializer=Zeros(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(ComplexConv3D, self).__init__( rank=3, dtype=dtype, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, groups=groups, activation=activations.get(activation), use_bias=use_bias, kernel_initializer=initializers.get(kernel_initializer), bias_initializer=initializers.get(bias_initializer), kernel_regularizer=regularizers.get(kernel_regularizer), bias_regularizer=regularizers.get(bias_regularizer), activity_regularizer=regularizers.get(activity_regularizer), kernel_constraint=constraints.get(kernel_constraint), bias_constraint=constraints.get(bias_constraint), **kwargs)
def __init__(self, rank, filters, kernel_size, dtype, strides=1, padding='valid', data_format=None, dilation_rate=1, groups=1, activation=None, use_bias=True, kernel_initializer=ComplexGlorotUniform(), bias_initializer=Zeros(), kernel_regularizer=None, bias_regularizer=None, # TODO: Not yet working activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, conv_op=None, **kwargs): if kernel_regularizer is not None or bias_regularizer is not None: logger.warning(f"Sorry, regularizers are not implemented yet, this parameter will take no effect") super(ComplexConv, self).__init__( trainable=trainable, name=name, activity_regularizer=regularizers.get(activity_regularizer), **kwargs) self.rank = rank self.my_dtype = tf.dtypes.as_dtype(dtype) # I use no default dtype to make sure I don't forget to give it to my ComplexConv layers if isinstance(filters, float): filters = int(filters) self.filters = filters self.groups = groups or 1 self.kernel_size = conv_utils.normalize_tuple( kernel_size, rank, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, rank, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = conv_utils.normalize_data_format(data_format) self.dilation_rate = conv_utils.normalize_tuple( dilation_rate, rank, 'dilation_rate') self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = InputSpec(min_ndim=self.rank + 2) self._validate_init() self._is_causal = self.padding == 'causal' self._channels_first = self.data_format == 'channels_first' self._tf_data_format = conv_utils.convert_data_format( self.data_format, self.rank + 2)
def build(self, input_shape): self.epsilon_matrix = tf.eye( 2, dtype=self.my_dtype.real_dtype) * self.epsilon # Cast the negative indices to positive self.axis = [ len(input_shape) + ax if ax < 0 else ax for ax in self.axis ] self.used_axis = [ ax for ax in range(0, len(input_shape)) if ax not in self.axis ] desired_shape = [input_shape[ax] for ax in self.axis] if self.my_dtype.is_complex: self.gamma_r = tf.Variable(name='gamma_r', initial_value=self.gamma_initializer( shape=tuple(desired_shape), dtype=self.my_dtype), trainable=True) self.gamma_i = tf.Variable( name='gamma_i', initial_value=Zeros()(shape=tuple(desired_shape), dtype=self.my_dtype), trainable=True ) # I think I just need to scale with gamma, so by default I leave the imag part to zero self.beta_r = tf.Variable(name="beta_r", initial_value=self.beta_initializer( shape=desired_shape, dtype=self.my_dtype), trainable=True) self.beta_i = tf.Variable(name="beta_i", initial_value=self.beta_initializer( shape=desired_shape, dtype=self.my_dtype), trainable=True) self.moving_mean = tf.Variable( name='moving_mean', initial_value=tf.complex( real=self.moving_mean_initializer(shape=desired_shape, dtype=self.my_dtype), imag=self.moving_mean_initializer(shape=desired_shape, dtype=self.my_dtype)), trainable=False) self.moving_var = tf.Variable( name='moving_var', initial_value=tf.eye(2) * self.moving_variance_initializer( shape=tuple(desired_shape) + (2, 2), dtype=self.my_dtype) / tf.math.sqrt(2.), trainable=False) else: self.gamma = tf.Variable(name='gamma', initial_value=self.gamma_initializer( shape=tuple(desired_shape), dtype=self.my_dtype), trainable=True) self.beta = tf.Variable(name="beta", initial_value=self.beta_initializer( shape=desired_shape, dtype=self.my_dtype), trainable=True) self.moving_mean = tf.Variable( name='moving_mean', initial_value=self.moving_mean_initializer( shape=desired_shape, dtype=self.my_dtype), trainable=False) self.moving_var = tf.Variable( name='moving_var', initial_value=tf.eye(2, dtype=self.my_dtype) * self.moving_variance_initializer( shape=tuple(desired_shape) + (2, 2), dtype=self.my_dtype), trainable=False)
def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), groups=1, activation=None, use_bias=True, dtype=DEFAULT_COMPLEX_TYPE, kernel_initializer=ComplexGlorotUniform(), bias_initializer=Zeros(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): """ :param filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). :param kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. :param strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. :param padding: one of `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. :param data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch_size, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch_size, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be `channels_last`. :param dilation_rate: an integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. :param groups: A positive integer specifying the number of groups in which the input is split along the channel axis. Each group is convolved separately with `filters / groups` filters. The output is the concatenation of all the `groups` results along the channel axis. Input channels and `filters` must both be divisible by `groups`. :param activation: Activation function to use. If you don't specify anything, no activation is applied. For complex :code:`dtype`, this must be a :code:`cvnn.activations` module. :param use_bias: Boolean, whether the layer uses a bias vector. :param kernel_initializer: Initializer for the `kernel` weights matrix (see `keras.initializers`). :param bias_initializer: Initializer for the bias vector (see `keras.initializers`). :param kernel_regularizer: Regularizer function applied to the `kernel` weights matrix (see `keras.regularizers`). :param bias_regularizer: Regularizer function applied to the bias vector (see `keras.regularizers`). :param activity_regularizer: Regularizer function applied to the output of the layer (its "activation") (see `keras.regularizers`). :param kernel_constraint: Constraint function applied to the kernel matrix (see `keras.constraints`). :param bias_constraint: Constraint function applied to the bias vector (see `keras.constraints`). """ super(ComplexConv2D, self).__init__( rank=2, dtype=dtype, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, groups=groups, activation=activations.get(activation), use_bias=use_bias, kernel_initializer=initializers.get(kernel_initializer), bias_initializer=initializers.get(bias_initializer), kernel_regularizer=regularizers.get(kernel_regularizer), bias_regularizer=regularizers.get(bias_regularizer), activity_regularizer=regularizers.get(activity_regularizer), kernel_constraint=constraints.get(kernel_constraint), bias_constraint=constraints.get(bias_constraint), **kwargs)