Beispiel #1
0
  def __init__(self,
               axis=-1,
               momentum=0.99,
               epsilon=1e-3,
               center=True,
               scale=True,
               beta_initializer='zeros',
               gamma_initializer='ones',
               moving_mean_initializer='zeros',
               moving_variance_initializer='ones',
               beta_regularizer=None,
               gamma_regularizer=None,
               beta_constraint=None,
               gamma_constraint=None,
               renorm=False,
               renorm_clipping=None,
               renorm_momentum=0.99,
               fused=None,
               trainable=True,
               virtual_batch_size=None,
               adjustment=None,
               name=None,
               **kwargs):
    super(BatchNormalization, self).__init__(
        name=name, trainable=trainable, **kwargs)
    if isinstance(axis, list):
      self.axis = axis[:]
    else:
      self.axis = axis
    self.momentum = momentum
    self.epsilon = epsilon
    self.center = center
    self.scale = scale
    self.beta_initializer = initializers.get(beta_initializer)
    self.gamma_initializer = initializers.get(gamma_initializer)
    self.moving_mean_initializer = initializers.get(moving_mean_initializer)
    self.moving_variance_initializer = initializers.get(
        moving_variance_initializer)
    self.beta_regularizer = regularizers.get(beta_regularizer)
    self.gamma_regularizer = regularizers.get(gamma_regularizer)
    self.beta_constraint = constraints.get(beta_constraint)
    self.gamma_constraint = constraints.get(gamma_constraint)
    self.renorm = renorm
    self.virtual_batch_size = virtual_batch_size
    self.adjustment = adjustment
    if fused is None:
      fused = True
    self.supports_masking = True

    self.fused = fused
    self._bessels_correction_test_only = True

    if renorm:
      renorm_clipping = renorm_clipping or {}
      keys = ['rmax', 'rmin', 'dmax']
      if set(renorm_clipping) - set(keys):
        raise ValueError('renorm_clipping %s contains keys not in %s' %
                         (renorm_clipping, keys))
      self.renorm_clipping = renorm_clipping
      self.renorm_momentum = renorm_momentum
Beispiel #2
0
 def __init__(self,
              filters,
              kernel_size,
              strides=1,
              padding='valid',
              data_format=None,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
   super(LocallyConnected1D, self).__init__(**kwargs)
   self.filters = filters
   self.kernel_size = conv_utils.normalize_tuple(kernel_size, 1, 'kernel_size')
   self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
   self.padding = conv_utils.normalize_padding(padding)
   if self.padding != 'valid':
     raise ValueError('Invalid border mode for LocallyConnected1D '
                      '(only "valid" is supported): ' + padding)
   self.data_format = conv_utils.normalize_data_format(data_format)
   self.activation = activations.get(activation)
   self.use_bias = use_bias
   self.kernel_initializer = initializers.get(kernel_initializer)
   self.bias_initializer = initializers.get(bias_initializer)
   self.kernel_regularizer = regularizers.get(kernel_regularizer)
   self.bias_regularizer = regularizers.get(bias_regularizer)
   self.activity_regularizer = regularizers.get(activity_regularizer)
   self.kernel_constraint = constraints.get(kernel_constraint)
   self.bias_constraint = constraints.get(bias_constraint)
   self.input_spec = InputSpec(ndim=3)
Beispiel #3
0
  def __init__(self,
               units,
               activation=None,
               use_bias=True,
               kernel_initializer='glorot_uniform',
               bias_initializer='zeros',
               kernel_regularizer=None,
               bias_regularizer=None,
               activity_regularizer=None,
               kernel_constraint=None,
               bias_constraint=None,
               **kwargs):
    if 'input_shape' not in kwargs and 'input_dim' in kwargs:
      kwargs['input_shape'] = (kwargs.pop('input_dim'),)

    super(Dense, self).__init__(
        activity_regularizer=regularizers.get(activity_regularizer), **kwargs)
    self.units = int(units)
    self.activation = activations.get(activation)
    self.use_bias = use_bias
    self.kernel_initializer = initializers.get(kernel_initializer)
    self.bias_initializer = initializers.get(bias_initializer)
    self.kernel_regularizer = regularizers.get(kernel_regularizer)
    self.bias_regularizer = regularizers.get(bias_regularizer)
    self.kernel_constraint = constraints.get(kernel_constraint)
    self.bias_constraint = constraints.get(bias_constraint)

    self.supports_masking = True
    self.input_spec = InputSpec(min_ndim=2)
  def __init__(self,
               axis=-1,
               epsilon=1e-3,
               center=True,
               scale=True,
               beta_initializer='zeros',
               gamma_initializer='ones',
               beta_regularizer=None,
               gamma_regularizer=None,
               beta_constraint=None,
               gamma_constraint=None,
               trainable=True,
               name=None,
               **kwargs):
    super(LayerNormalization, self).__init__(
        name=name, trainable=trainable, **kwargs)
    if isinstance(axis, (list, tuple)):
      self.axis = axis[:]
    elif isinstance(axis, int):
      self.axis = axis
    else:
      raise ValueError('Expected an int or a list/tuple of ints for the '
                       'argument \'axis\', but received instead: %s' % axis)

    self.epsilon = epsilon
    self.center = center
    self.scale = scale
    self.beta_initializer = initializers.get(beta_initializer)
    self.gamma_initializer = initializers.get(gamma_initializer)
    self.beta_regularizer = regularizers.get(beta_regularizer)
    self.gamma_regularizer = regularizers.get(gamma_regularizer)
    self.beta_constraint = constraints.get(beta_constraint)
    self.gamma_constraint = constraints.get(gamma_constraint)

    self.supports_masking = True
  def __init__(self,
               units,
               activation='tanh',
               recurrent_activation='hard_sigmoid',
               kernel_initializer='glorot_uniform',
               recurrent_initializer='orthogonal',
               bias_initializer='zeros',
               unit_forget_bias=True,
               kernel_regularizer=None,
               recurrent_regularizer=None,
               bias_regularizer=None,
               activity_regularizer=None,
               kernel_constraint=None,
               recurrent_constraint=None,
               bias_constraint=None,
               return_sequences=False,
               return_state=False,
               go_backwards=False,
               stateful=False,
               time_major=False,
               **kwargs):
    super(RNN, self).__init__(**kwargs)  # pylint: disable=bad-super-call
    self.units = units
    cell_spec = collections.namedtuple('cell', ['state_size', 'output_size'])
    self.cell = cell_spec(
        state_size=(self.units, self.units), output_size=self.units)
    self.activation = activations.get(activation)
    self.recurrent_activation = activations.get(recurrent_activation)
    self.kernel_initializer = initializers.get(kernel_initializer)
    self.recurrent_initializer = initializers.get(recurrent_initializer)
    self.bias_initializer = initializers.get(bias_initializer)
    self.unit_forget_bias = unit_forget_bias

    self.kernel_regularizer = regularizers.get(kernel_regularizer)
    self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
    self.bias_regularizer = regularizers.get(bias_regularizer)
    self.activity_regularizer = regularizers.get(activity_regularizer)

    self.kernel_constraint = constraints.get(kernel_constraint)
    self.recurrent_constraint = constraints.get(recurrent_constraint)
    self.bias_constraint = constraints.get(bias_constraint)

    self.return_sequences = return_sequences
    self.return_state = return_state
    self.go_backwards = go_backwards
    self.stateful = stateful
    self.time_major = time_major
    self._num_constants = None
    self._num_inputs = None
    self._states = None
    self.input_spec = [InputSpec(ndim=3)]
    self.state_spec = [
        InputSpec(shape=(None, dim)) for dim in (self.units, self.units)
    ]
  def __init__(self,
               filters,
               kernel_size,
               strides=(1, 1),
               padding='valid',
               data_format=None,
               dilation_rate=(1, 1),
               activation='tanh',
               recurrent_activation='hard_sigmoid',
               use_bias=True,
               kernel_initializer='glorot_uniform',
               recurrent_initializer='orthogonal',
               bias_initializer='zeros',
               unit_forget_bias=True,
               kernel_regularizer=None,
               recurrent_regularizer=None,
               bias_regularizer=None,
               kernel_constraint=None,
               recurrent_constraint=None,
               bias_constraint=None,
               dropout=0.,
               recurrent_dropout=0.,
               **kwargs):
    super(ConvLSTM2DCell, self).__init__(**kwargs)
    self.filters = filters
    self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
    self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
    self.padding = conv_utils.normalize_padding(padding)
    self.data_format = conv_utils.normalize_data_format(data_format)
    self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2,
                                                    'dilation_rate')
    self.activation = activations.get(activation)
    self.recurrent_activation = activations.get(recurrent_activation)
    self.use_bias = use_bias

    self.kernel_initializer = initializers.get(kernel_initializer)
    self.recurrent_initializer = initializers.get(recurrent_initializer)
    self.bias_initializer = initializers.get(bias_initializer)
    self.unit_forget_bias = unit_forget_bias

    self.kernel_regularizer = regularizers.get(kernel_regularizer)
    self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
    self.bias_regularizer = regularizers.get(bias_regularizer)

    self.kernel_constraint = constraints.get(kernel_constraint)
    self.recurrent_constraint = constraints.get(recurrent_constraint)
    self.bias_constraint = constraints.get(bias_constraint)

    self.dropout = min(1., max(0., dropout))
    self.recurrent_dropout = min(1., max(0., recurrent_dropout))
    self.state_size = (self.filters, self.filters)
    self._dropout_mask = None
    self._recurrent_dropout_mask = None
Beispiel #7
0
  def __init__(self,
               input_dim,
               output_dim,
               embeddings_initializer='uniform',
               embeddings_regularizer=None,
               activity_regularizer=None,
               embeddings_constraint=None,
               mask_zero=False,
               input_length=None,
               **kwargs):
    if 'input_shape' not in kwargs:
      if input_length:
        kwargs['input_shape'] = (input_length,)
      else:
        kwargs['input_shape'] = (None,)
    dtype = kwargs.pop('dtype', K.floatx())
    super(Embedding, self).__init__(dtype=dtype, **kwargs)

    self.input_dim = input_dim
    self.output_dim = output_dim
    self.embeddings_initializer = initializers.get(embeddings_initializer)
    self.embeddings_regularizer = regularizers.get(embeddings_regularizer)
    self.activity_regularizer = regularizers.get(activity_regularizer)
    self.embeddings_constraint = constraints.get(embeddings_constraint)
    self.mask_zero = mask_zero
    self.supports_masking = mask_zero
    self.input_length = input_length
Beispiel #8
0
  def __init__(self,
               norm_axis=None,
               params_axis=-1,
               epsilon=1e-12,
               center=True,
               scale=True,
               beta_initializer='zeros',
               gamma_initializer='ones',
               beta_regularizer=None,
               gamma_regularizer=None,
               beta_constraint=None,
               gamma_constraint=None,
               trainable=True,
               name=None,
               **kwargs):
    super(LayerNormalization, self).__init__(
        name=name, trainable=trainable, **kwargs)
    if isinstance(norm_axis, list):
      self.norm_axis = norm_axis[:]
    elif isinstance(norm_axis, int):
      self.norm_axis = norm_axis
    elif norm_axis is None:
      self.norm_axis = None
    else:
      raise TypeError('norm_axis must be int or list or None, type given: %s'
                      % type(norm_axis))

    if isinstance(params_axis, list):
      self.params_axis = params_axis[:]
    elif isinstance(params_axis, int):
      self.params_axis = params_axis
    else:
      raise TypeError('params_axis must be int or list, type given: %s'
                      % type(params_axis))

    self.epsilon = epsilon
    self.center = center
    self.scale = scale
    self.beta_initializer = initializers.get(beta_initializer)
    self.gamma_initializer = initializers.get(gamma_initializer)
    self.beta_regularizer = regularizers.get(beta_regularizer)
    self.gamma_regularizer = regularizers.get(gamma_regularizer)
    self.beta_constraint = constraints.get(beta_constraint)
    self.gamma_constraint = constraints.get(gamma_constraint)

    self.supports_masking = True
Beispiel #9
0
  def __init__(self,
               units,
               kernel_initializer='glorot_uniform',
               recurrent_initializer='orthogonal',
               bias_initializer='zeros',
               unit_forget_bias=True,
               kernel_regularizer=None,
               recurrent_regularizer=None,
               bias_regularizer=None,
               activity_regularizer=None,
               kernel_constraint=None,
               recurrent_constraint=None,
               bias_constraint=None,
               return_sequences=False,
               return_state=False,
               go_backwards=False,
               stateful=False,
               **kwargs):
    self.units = units
    cell_spec = collections.namedtuple('cell', 'state_size')
    self._cell = cell_spec(state_size=(self.units, self.units))
    super(CuDNNLSTM, self).__init__(
        return_sequences=return_sequences,
        return_state=return_state,
        go_backwards=go_backwards,
        stateful=stateful,
        **kwargs)

    self.kernel_initializer = initializers.get(kernel_initializer)
    self.recurrent_initializer = initializers.get(recurrent_initializer)
    self.bias_initializer = initializers.get(bias_initializer)
    self.unit_forget_bias = unit_forget_bias

    self.kernel_regularizer = regularizers.get(kernel_regularizer)
    self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
    self.bias_regularizer = regularizers.get(bias_regularizer)
    self.activity_regularizer = regularizers.get(activity_regularizer)

    self.kernel_constraint = constraints.get(kernel_constraint)
    self.recurrent_constraint = constraints.get(recurrent_constraint)
    self.bias_constraint = constraints.get(bias_constraint)
 def __init__(self,
              alpha_initializer='zeros',
              alpha_regularizer=None,
              alpha_constraint=None,
              shared_axes=None,
              **kwargs):
   super(PReLU, self).__init__(**kwargs)
   self.supports_masking = True
   self.alpha_initializer = initializers.get(alpha_initializer)
   self.alpha_regularizer = regularizers.get(alpha_regularizer)
   self.alpha_constraint = constraints.get(alpha_constraint)
   if shared_axes is None:
     self.shared_axes = None
   elif not isinstance(shared_axes, (list, tuple)):
     self.shared_axes = [shared_axes]
   else:
     self.shared_axes = list(shared_axes)
Beispiel #11
0
    def __init__(self,
                 axis=-1,
                 momentum=0.99,
                 epsilon=1e-3,
                 center=True,
                 scale=True,
                 beta_initializer='zeros',
                 gamma_initializer='ones',
                 moving_mean_initializer='zeros',
                 moving_variance_initializer='ones',
                 beta_regularizer=None,
                 gamma_regularizer=None,
                 beta_constraint=None,
                 gamma_constraint=None,
                 renorm=False,
                 renorm_clipping=None,
                 renorm_momentum=0.99,
                 fused=None,
                 trainable=True,
                 virtual_batch_size=None,
                 adjustment=None,
                 name=None,
                 **kwargs):
        super(BatchNormalization, self).__init__(name=name,
                                                 trainable=trainable,
                                                 **kwargs)
        if isinstance(axis, list):
            self.axis = axis[:]
        else:
            self.axis = axis
        self.momentum = momentum
        self.epsilon = epsilon
        self.center = center
        self.scale = scale
        self.beta_initializer = initializers.get(beta_initializer)
        self.gamma_initializer = initializers.get(gamma_initializer)
        self.moving_mean_initializer = initializers.get(
            moving_mean_initializer)
        self.moving_variance_initializer = initializers.get(
            moving_variance_initializer)
        self.beta_regularizer = regularizers.get(beta_regularizer)
        self.gamma_regularizer = regularizers.get(gamma_regularizer)
        self.beta_constraint = constraints.get(beta_constraint)
        self.gamma_constraint = constraints.get(gamma_constraint)
        self.renorm = renorm
        self.virtual_batch_size = virtual_batch_size
        self.adjustment = adjustment
        if fused is None:
            fused = True
        self.supports_masking = True

        self.fused = fused
        self._bessels_correction_test_only = True

        if renorm:
            renorm_clipping = renorm_clipping or {}
            keys = ['rmax', 'rmin', 'dmax']
            if set(renorm_clipping) - set(keys):
                raise ValueError('renorm_clipping %s contains keys not in %s' %
                                 (renorm_clipping, keys))
            self.renorm_clipping = renorm_clipping
            self.renorm_momentum = renorm_momentum
Beispiel #12
0
    def __init__(self,
                 filters,
                 kernel_size,
                 octave=2,
                 ratio_out=0.5,
                 strides=1,
                 dilation_rate=1,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super(OctaveConv1D, self).__init__(**kwargs)
        self.filters = filters
        self.kernel_size = kernel_size
        self.octave = octave
        self.ratio_out = ratio_out
        self.strides = strides
        self.dilation_rate = dilation_rate
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.filters_low = int(filters * self.ratio_out)
        self.filters_high = filters - self.filters_low

        self.conv_high_to_high, self.conv_low_to_high = None, None
        if self.filters_high > 0:
            self.conv_high_to_high = self._init_conv(
                self.filters_high, name='{}-Conv1D-HH'.format(self.name))
            self.conv_low_to_high = self._init_conv(self.filters_high,
                                                    name='{}-Conv1D-LH'.format(
                                                        self.name))
        self.conv_low_to_low, self.conv_high_to_low = None, None
        if self.filters_low > 0:
            self.conv_low_to_low = self._init_conv(self.filters_low,
                                                   name='{}-Conv1D-HL'.format(
                                                       self.name))
            self.conv_high_to_low = self._init_conv(self.filters_low,
                                                    name='{}-Conv1D-LL'.format(
                                                        self.name))
        self.pooling = AveragePooling1D(
            pool_size=self.octave,
            padding='valid',
            name='{}-AveragePooling1D'.format(self.name),
        )
        self.up_sampling = UpSampling1D(
            size=self.octave,
            name='{}-UpSampling1D'.format(self.name),
        )
  def __init__(self,
               axis=-1,
               momentum=0.99,
               epsilon=1e-3,
               center=True,
               scale=True,
               beta_initializer='zeros',
               gamma_initializer='ones',
               moving_mean_initializer='zeros',
               moving_variance_initializer='ones',
               beta_regularizer=None,
               gamma_regularizer=None,
               beta_constraint=None,
               gamma_constraint=None,
               renorm=False,
               renorm_clipping=None,
               renorm_momentum=0.99,
               fused=None,
               trainable=True,
               virtual_batch_size=None,
               adjustment=None,
               name=None,
               **kwargs):
    super(BatchNormalizationBase, self).__init__(
        name=name, trainable=trainable, **kwargs)
    if isinstance(axis, list):
      self.axis = axis[:]
    elif isinstance(axis, int):
      self.axis = axis
    else:
      raise TypeError('axis must be int or list, type given: %s'
                      % type(self.axis))
    self.momentum = momentum
    self.epsilon = epsilon
    self.center = center
    self.scale = scale
    self.beta_initializer = initializers.get(beta_initializer)
    self.gamma_initializer = initializers.get(gamma_initializer)
    self.moving_mean_initializer = initializers.get(moving_mean_initializer)
    self.moving_variance_initializer = initializers.get(
        moving_variance_initializer)
    self.beta_regularizer = regularizers.get(beta_regularizer)
    self.gamma_regularizer = regularizers.get(gamma_regularizer)
    self.beta_constraint = constraints.get(beta_constraint)
    self.gamma_constraint = constraints.get(gamma_constraint)
    self.renorm = renorm
    self.virtual_batch_size = virtual_batch_size
    self.adjustment = adjustment
    if self._USE_V2_BEHAVIOR:
      if fused:
        self._raise_if_fused_cannot_be_used()
      # We leave fused as None if self._fused_can_be_used()==True, since we
      # still may set it to False in self.build() if the input rank is not 4.
      elif fused is None and not self._fused_can_be_used():
        fused = False
    elif fused is None:
      fused = True
    self.supports_masking = True

    self.fused = fused
    self._bessels_correction_test_only = True

    if renorm:
      renorm_clipping = renorm_clipping or {}
      keys = ['rmax', 'rmin', 'dmax']
      if set(renorm_clipping) - set(keys):
        raise ValueError('renorm_clipping %s contains keys not in %s' %
                         (renorm_clipping, keys))
      self.renorm_clipping = renorm_clipping
      self.renorm_momentum = renorm_momentum
    def __init__(
            self,
            feature_units,
            attn_heads=1,
            attn_heads_reduction="concat",  # {"concat", "average"}
            dropout_rate=0.5,
            activation="relu",
            use_bias=True,
            kernel_initializer="glorot_uniform",
            bias_initializer="zeros",
            attn_kernel_initializer="glorot_uniform",
            kernel_regularizer=None,
            bias_regularizer=None,
            attn_kernel_regularizer=None,
            activity_regularizer=None,
            kernel_constraint=None,
            bias_constraint=None,
            attn_kernel_constraint=None,
            attention=True,
            **kwargs):

        if attn_heads_reduction not in {"concat", "average"}:
            raise ValueError("Possbile reduction methods: concat, average")

        self.F_ = feature_units  # Number of output features (F" in the paper)
        self.attn_heads = attn_heads  # Number of attention heads (K in the paper)
        self.attn_heads_reduction = attn_heads_reduction  # Eq. 5 and 6 in the paper
        self.dropout_rate = dropout_rate  # Internal dropout rate
        self.activation = activations.get(activation)  # Eq. 4 in the paper
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.attn_kernel_initializer = initializers.get(
            attn_kernel_initializer)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.attn_kernel_regularizer = regularizers.get(
            attn_kernel_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.attn_kernel_constraint = constraints.get(attn_kernel_constraint)
        self.supports_masking = False
        self.attention = attention

        # Populated by build()
        self.kernels = []  # Layer kernels for attention heads
        self.biases = []  # Layer biases for attention heads
        self.attn_kernels = []  # Attention kernels for attention heads

        if attn_heads_reduction == "concat":
            # Output will have shape (..., K * F")
            self.output_dim = self.F_ * self.attn_heads
        else:
            # Output will have shape (..., F")
            self.output_dim = self.F_

        super(GraphAttentionLayer, self).__init__(**kwargs)
Beispiel #15
0
    def __init__(self,
                 time_scale,
                 size_per_head,
                 num_attention_heads,
                 num_memory_slots,
                 use_relative_position=True,
                 activation='tanh',
                 recurrent_activation='hard_sigmoid',
                 mlp_activation='gelu',
                 forget_bias=1.0,
                 input_bias=0.0,
                 sigma_bias=-4.6,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 attention_initializer='truncated_normal',
                 mlp_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 attention_regularizer=None,
                 mlp_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 recurrent_constraint=None,
                 attention_constraint=None,
                 mlp_constraint=None,
                 bias_constraint=None,
                 dropout=0.,
                 recurrent_dropout=0.,
                 **kwargs):
        super(TmpHierRMCRNNCell, self).__init__(**kwargs)
        self.time_scale = time_scale
        self.units = num_attention_heads * size_per_head
        self.num_memory_slots = num_memory_slots
        self.num_attention_heads = num_attention_heads
        self.size_per_head = size_per_head
        self.use_relative_position = use_relative_position

        self.activation = wrap_activations_get(activation)
        self.recurrent_activation = wrap_activations_get(recurrent_activation)
        self.mlp_activation = wrap_activations_get(mlp_activation)
        self.forget_bias = forget_bias
        self.input_bias = input_bias
        self.sigma_bias = sigma_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.attention_initializer = initializers.get(attention_initializer)
        self.mlp_initializer = initializers.get(mlp_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.attention_regularizer = regularizers.get(attention_regularizer)
        self.mlp_regularizer = regularizers.get(mlp_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.attention_constraint = constraints.get(attention_constraint)
        self.mlp_constraint = constraints.get(mlp_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
        # tuple(_ListWrapper) was silently dropping list content in at least 2.7.10,
        # and fixed after 2.7.16. Converting the state_size to wrapper around
        # NoDependency(), so that the base_layer.__setattr__ will not convert it to
        # ListWrapper. Down the stream, self.states will be a list since it is
        # generated from nest.map_structure with list, and tuple(list) will work
        # properly.
        self.state_size = [
            1, 2 * (4 + self.num_memory_slots) * self.units]
        self.output_size = self.units
Beispiel #16
0
    def __init__(self,
                 units,
                 activation='tanh',
                 recurrent_activation='hard_sigmoid',
                 attention_activation='tanh',
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 attention_initializer='orthogonal',
                 bias_initializer='zeros',
                 unit_forget_bias=True,
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 attention_regularizer=None,
                 kernel_constraint=None,
                 recurrent_constraint=None,
                 bias_constraint=None,
                 attention_constraint=None,
                 dropout=0.,
                 recurrent_dropout=0.,
                 return_attention=False,
                 implementation=1,
                 **kwargs):
        super(AttentionLSTMCell, self).__init__(**kwargs)
        self.input_spec = [InputSpec(ndim=3)]
        self.units = units
        self.activation = activations.get(activation)
        self.recurrent_activation = activations.get(recurrent_activation)
        self.attention_activation = activations.get(attention_activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.attention_initializer = initializers.get(attention_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.unit_forget_bias = unit_forget_bias

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.attention_regularizer = regularizers.get(attention_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.attention_constraint = constraints.get(attention_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
        self.return_attention = return_attention
        self._dropout_mask = None
        self._recurrent_dropout_mask = None
        self.implementation = implementation
        self.state_spec = [
            InputSpec(shape=(None, self.units)),
            InputSpec(shape=(None, self.units))
        ]
        self.state_size = (self.units, self.units)
Beispiel #17
0
    def __init__(self,
                 axis=-1,
                 momentum=0.99,
                 epsilon=1e-3,
                 center=True,
                 scale=True,
                 beta_initializer='zeros',
                 gamma_initializer='ones',
                 moving_mean_initializer='zeros',
                 moving_variance_initializer='ones',
                 beta_regularizer=None,
                 gamma_regularizer=None,
                 beta_constraint=None,
                 gamma_constraint=None,
                 renorm=False,
                 renorm_clipping=None,
                 renorm_momentum=0.99,
                 fused=None,
                 trainable=True,
                 virtual_batch_size=None,
                 adjustment=None,
                 name=None,
                 **kwargs):
        super(BatchNormalizationV2, self).__init__(name=name,
                                                   trainable=trainable,
                                                   **kwargs)
        if isinstance(axis, list):
            self.axis = axis[:]
        elif isinstance(axis, int):
            self.axis = axis
        else:
            raise TypeError('axis must be int or list, type given: %s' %
                            type(self.axis))
        self.momentum = momentum
        self.epsilon = epsilon
        self.center = center
        self.scale = scale
        self.beta_initializer = initializers.get(beta_initializer)
        self.gamma_initializer = initializers.get(gamma_initializer)
        self.moving_mean_initializer = initializers.get(
            moving_mean_initializer)
        self.moving_variance_initializer = initializers.get(
            moving_variance_initializer)
        self.beta_regularizer = regularizers.get(beta_regularizer)
        self.gamma_regularizer = regularizers.get(gamma_regularizer)
        self.beta_constraint = constraints.get(beta_constraint)
        self.gamma_constraint = constraints.get(gamma_constraint)
        self.renorm = renorm
        self.virtual_batch_size = virtual_batch_size
        self.adjustment = adjustment
        if self._USE_V2_BEHAVIOR:
            if fused:
                self._raise_if_fused_cannot_be_used()
            # We leave fused as None if self._fused_can_be_used()==True, since we
            # still may set it to False in self.build() if the input rank is not 4.
            elif fused is None and not self._fused_can_be_used():
                fused = False
        elif fused is None:
            fused = True
        self.supports_masking = True

        self.fused = fused
        self._bessels_correction_test_only = True

        if renorm:
            renorm_clipping = renorm_clipping or {}
            keys = ['rmax', 'rmin', 'dmax']
            if set(renorm_clipping) - set(keys):
                raise ValueError('renorm_clipping %s contains keys not in %s' %
                                 (renorm_clipping, keys))
            self.renorm_clipping = renorm_clipping
            self.renorm_momentum = renorm_momentum
    def __init__(self,
                 n_slots,
                 n_heads,
                 head_size,
                 n_blocks=1,
                 n_layers=3,
                 activation='tanh',
                 recurrent_activation='hard_sigmoid',
                 forget_bias=1.0,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 recurrent_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        """Initialization method.

        Args:
            n_slots (int): Number of memory slots.
            n_heads (int): Number of attention heads.
            head_size (int): Size of each attention head.
            n_blocks (int): Number of feed-forward networks.
            n_layers (int): Amout of layers per feed-forward network.
            activation (str): Output activation function.
            recurrent_activation (str): Recurrent step activation function.
            forget_bias (float): Forget gate bias values.
            kernel_initializer (str): Kernel initializer function.
            recurrent_initializer (str): Recurrent kernel initializer function.
            bias_initializer (str): Bias initializer function.
            kernel_regularizer (str): Kernel regularizer function.
            recurrent_regularizer (str): Recurrent kernel regularizer function.
            bias_regularizer (str): Bias regularizer function.
            kernel_constraint (str): Kernel constraint function.
            recurrent_constraint (str): Recurrent kernel constraint function.
            bias_constraint (str): Bias constraint function.

        """

        # Overrides its parent class with any custom arguments if needed
        super(RelationalMemoryCell, self).__init__(**kwargs)

        # Number of memory slots and their sizes
        self.n_slots = n_slots
        self.slot_size = n_heads * head_size

        # Number of attention heads and their sizes
        self.n_heads = n_heads
        self.head_size = head_size

        # Number of feed-forward network blocks and their sizes
        self.n_blocks = n_blocks
        self.n_layers = n_layers

        # Activation functions
        self.activation = activations.get(activation)
        self.recurrent_activation = activations.get(recurrent_activation)

        # Forget gate bias value
        self.forget_bias = forget_bias

        # `W`, `U` and `b` initializers
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        # `W`, `U` and `b` regularizers
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        # `W`, `U` and `b` constraints
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        # Number of outputted units
        self.units = self.slot_size * n_slots

        # Number of outputted units from the gates
        self.n_gates = 2 * self.slot_size

        # Creating a layer for projecting the input
        self.projector = Dense(self.slot_size)

        # Creating the feed-forward network
        # It is composed by linear layers and normalization ones
        self.before_norm = LayerNormalization()
        self.linear = [
            Dense(self.slot_size, activation='relu') for _ in range(n_layers)
        ]
        self.after_norm = LayerNormalization()

        # Creating the Multi-Head Attention layer
        self.attn = MultiHeadAttention(self.slot_size, self.n_heads)
Beispiel #19
0
    def __init__(self,
                 low_bound,
                 sup_bound,
                 with_sum='n',
                 a_initializer='ones',
                 a_regularizer=None,
                 a_constraint=None,
                 b_initializer='zeros',
                 b_regularizer=None,
                 b_constraint=None,
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
          kwargs['input_shape'] = (kwargs.pop('input_dim'),)

        super(Restrict, self).__init__(
            activity_regularizer=None, **kwargs)
        try:
            low_bound, sup_bound = float(low_bound), float(sup_bound)
        except TypeError:
            pass
        check_l, check_s = isinstance(low_bound, float), isinstance(sup_bound, float)
        if (check_l and (not check_s)) or (check_s and (not check_l)):
            raise TypeError('The input `low_bound` and `sup_bound` does not share the same float type.')
        elif check_l and check_s:
            self.arg_array = False
        else:
            check_l, check_s = isinstance(low_bound, (list, tuple)), isinstance(sup_bound, (list, tuple))
            if (check_l and (not check_s)) or (check_s and (not check_l)):
                raise TypeError('The input `low_bound` and `sup_bound` does not share the same list/tuple type.')
            if len(low_bound) != len(sup_bound):
                raise TypeError('The input `low_bound` and `sup_bound` does not share the same length')
            for l,s in zip(low_bound, sup_bound):
                if l >= s:
                    raise ValueError('The input `low_bound` should be less than `sup_bound`, but received ' + 
                                     str(low_bound) + ' ' + str(sup_bound))
            if with_sum == 'i' and len(low_bound) > 1:
                for l in low_bound[1:]:
                    if l < 0:
                        raise ValueError('When set increasing mode, each element of `low_bound[1:]` should be '
                                         'non-negative, but received ' + str(low_bound))
            elif with_sum == 'd' and len(low_bound) > 1:
                for l in low_bound[:-1]:
                    if l < 0:
                        raise ValueError('When set increasing mode, each element of `low_bound[:-1]` should be '
                                         'non-negative, but received ' + str(low_bound))
            if check_l and check_s:
                self.arg_array = True
            else:
                raise TypeError('At least one of `low_bound` and `sup_bound` has wrong type.')
            low_bound = np.array(low_bound, dtype=np.float)
            sup_bound = np.array(sup_bound, dtype=np.float)
        self.low_bound = low_bound
        self.sup_bound = sup_bound
        if with_sum not in ('i', 'd', 'n'):
            raise ValueError('The input `with_sum` only supports 3 modes: i, d, n. But received ' + str(with_sum))
        self.with_sum = with_sum
        self.a_initializer = initializers.get(a_initializer)
        self.a_regularizer = regularizers.get(a_regularizer)
        self.a_constraint = constraints.get(a_constraint)
        self.b_initializer = initializers.get(b_initializer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.b_constraint = constraints.get(b_constraint)
        self.supports_masking = True
Beispiel #20
0
    def __init__(self,
                 units,
                 projection_units,
                 use_feedback=True,
                 use_recurrent=True,
                 activation='tanh',
                 projection_activation='sigmoid',
                 use_bias=True,
                 use_projection_bias=True,
                 kernel_initializer='glorot_uniform',
                 projection_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 feedback_initializer='orthogonal',
                 bias_initializer='zeros',
                 projection_bias_initializer='zeros',
                 kernel_regularizer=None,
                 projection_regularizer=None,
                 recurrent_regularizer=None,
                 feedback_regularizer=None,
                 bias_regularizer=None,
                 projection_bias_regularizer=None,
                 kernel_constraint=None,
                 projection_constraint=None,
                 recurrent_constraint=None,
                 feedback_constraint=None,
                 bias_constraint=None,
                 projection_bias_constraint=None,
                 dropout=0.,
                 recurrent_dropout=0.,
                 **kwargs):
        super(Cell, self).__init__(**kwargs)
        self.units = units
        self.projection_units = projection_units
        self.use_feedback = use_feedback
        self.use_recurrent = use_recurrent
        self.activation = activations.get(activation)
        self.projection_activation = activations.get(projection_activation)
        self.use_bias = use_bias
        self.use_projection_bias = use_projection_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.projection_initializer = initializers.get(projection_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.feedback_initializer = initializers.get(feedback_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.projection_bias_initializer = initializers.get(
            projection_bias_initializer)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.projection_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.feedback_regularizer = regularizers.get(feedback_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.projection_bias_regularizer = regularizers.get(
            projection_bias_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.projection_constraint = constraints.get(projection_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.feedback_constraint = constraints.get(feedback_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.projection_bias_constraint = constraints.get(
            projection_bias_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))

        self.state_size = self.units
        self.output_size = self.units
        self.projection_size = self.projection_units
Beispiel #21
0
    def __init__(self,
                 rank,
                 filters,
                 kernel_size,
                 param_reduction=0.5,
                 form='diagonal',
                 strides=1,
                 padding='valid',
                 data_format=None,
                 dilation_rate=1,
                 groups=1,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 trainable=True,
                 name=None,
                 conv_op=None,
                 **kwargs):
        super(SzConv, self).__init__(
            trainable=trainable,
            name=name,
            activity_regularizer=regularizers.get(activity_regularizer),
            **kwargs)
        self.rank = rank

        if isinstance(filters, float):
            filters = int(filters)
        self.filters = filters
        self.groups = groups or 1
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                      'kernel_size')
        self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, rank, 'dilation_rate')

        self.activation = activations.get(activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(min_ndim=self.rank + 2)

        self._validate_init()
        self._is_causal = self.padding == 'causal'
        self._channels_first = self.data_format == 'channels_first'
        self._tf_data_format = conv_utils.convert_data_format(
            self.data_format, self.rank + 2)

        self.reduction_sv = param_reduction
        self.form = form
        self.num_ones = 0
        self.num_weights = 0
        self.reduced_ratio = 0
        self.halfbandwidth = 0
Beispiel #22
0
    def __init__(
            self,
            units,
            num_layers,
            extract_every_n_layers=3,  # Saves every n layer and return them concatenated
            activation=None,
            use_bias=True,
            scale=True,
            scf_min=0.2,
            scf_max=2.0,
            dropconnect_prob=0.00,
            dropout_prob=0.05,
            kernel_initializer='glorot_uniform',
            bias_initializer='zeros',
            kernel_regularizer=None,
            bias_regularizer=None,
            activity_regularizer=None,
            kernel_constraint=None,
            bias_constraint=None,
            **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'), )

        super(DensePipe, self).__init__(
            activity_regularizer=regularizers.get(activity_regularizer),
            **kwargs)

        # Save params
        self.units = units
        self.num_layers = num_layers
        self.extract_every_n_layers = extract_every_n_layers
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.dropconnect_prob = dropconnect_prob
        self.dropout_prob = dropout_prob
        self.scale = scale
        self.scf_min = scf_min
        self.scf_max = scf_max
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.kwargs = kwargs

        # Create Dense layers
        self.dense_layers = [
            ScaledLinear(units=self.units,
                         activation=self.activation,
                         scf_min=self.scf_min,
                         scf_max=self.scf_max,
                         scale=self.scale,
                         dropconnect_prob=self.dropconnect_prob,
                         kernel_initializer=self.kernel_initializer,
                         bias_initializer=self.bias_initializer,
                         kernel_regularizer=self.kernel_regularizer,
                         bias_regularizer=self.bias_regularizer,
                         kernel_constraint=self.kernel_constraint,
                         bias_constraint=self.bias_constraint,
                         name='dense') for layer in range(self.num_layers)
        ]

        self.dropout = tf.keras.layers.Dropout(self.dropout_prob)
    def __init__(
            self,
            rank: int,
            head_size: int,
            head_count: int,
            kernel_size: Union[int, Tuple, List],
            strides: Union[int, Tuple, List],
            # data_format: Optional[AnyStr],
            dilation_rate: Union[int, Tuple, List],
            activation: Optional[Union[AnyStr, Callable]],
            use_bias: bool,
            kernel_initializer: Optional[Union[Dict, AnyStr, Callable]],
            bias_initializer: Optional[Union[Dict, AnyStr, Callable]],
            embeddings_initializer: Optional[Union[Dict, AnyStr, Callable]],
            kernel_regularizer: Optional[Union[Dict, AnyStr, Callable]],
            bias_regularizer: Optional[Union[Dict, AnyStr, Callable]],
            activity_regularizer: Optional[Union[Dict, AnyStr, Callable]],
            kernel_constraint: Optional[Union[Dict, AnyStr, Callable]],
            bias_constraint: Optional[Union[Dict, AnyStr, Callable]],
            trainable=True,
            name=None,
            **kwargs):
        activity_regularizer = regularizers.get(activity_regularizer)
        super(StandAloneSelfAttention,
              self).__init__(trainable=trainable,
                             name=name,
                             activity_regularizer=activity_regularizer,
                             **kwargs)

        # region Utils (normalizing tuples, data format and getting initializers/regularizers/constraints)
        kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                 "kernel_size")
        strides = conv_utils.normalize_tuple(strides, rank, "strides")
        # data_format = conv_utils.normalize_data_format(data_format)
        dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank,
                                                   "dilation_rate")
        activation = activations.get(activation)
        kernel_initializer = initializers.get(kernel_initializer)
        bias_initializer = initializers.get(bias_initializer)
        if embeddings_initializer == "random_normal":
            embeddings_initializer = initializers.initializers_v2.RandomNormal(
                stddev=1.0)
        embeddings_initializer = initializers.get(embeddings_initializer)
        kernel_regularizer = regularizers.get(kernel_regularizer)
        bias_regularizer = regularizers.get(bias_regularizer)
        kernel_constraint = constraints.get(kernel_constraint)
        bias_constraint = constraints.get(bias_constraint)
        # endregion

        # region Base attributes
        self.rank = rank
        self.head_size = head_size
        self.head_count = head_count
        self.kernel_size = kernel_size
        self.strides = strides
        # self.data_format = data_format
        self.dilation_rate = dilation_rate
        self.activation = activation
        self.use_bias = use_bias
        self.kernel_initializer = kernel_initializer
        self.bias_initializer = bias_initializer
        self.embeddings_initializer = embeddings_initializer
        self.kernel_regularizer = kernel_regularizer
        self.bias_regularizer = bias_regularizer
        self.kernel_constraint = kernel_constraint
        self.bias_constraint = bias_constraint
        # endregion

        # region Queries/Keys/Values conv layers
        common_parameters = {
            "rank": self.rank,
            "filters": self.filters,
            "kernel_size": 1,
            "use_bias": self.use_bias,
            "kernel_initializer": self.kernel_initializer,
            "bias_initializer": self.bias_initializer,
            "kernel_regularizer": self.kernel_regularizer,
            "activity_regularizer": self.activity_regularizer,
            "kernel_constraint": self.kernel_constraint,
            "bias_constraint": self.bias_constraint,
        }
        self.queries_layer = Conv(name="Conv_Queries{}".format(self.name),
                                  **common_parameters)
        self.keys_layer = Conv(name="Conv_Keys{}".format(self.name),
                               **common_parameters)
        self.values_layer = Conv(name="Conv_Values{}".format(self.name),
                                 **common_parameters)
        # endregion

        # region Queries/Keys/Values unfold layers
        self.queries_unfold = Unfold(kernel_size=1,
                                     strides=strides,
                                     name="Unfold_Queries_{}".format(
                                         self.name))
        self.keys_unfold = Unfold(kernel_size=kernel_size,
                                  strides=strides,
                                  dilation_rate=dilation_rate,
                                  padding="SAME",
                                  name="Unfold_Keys_{}".format(self.name))
        self.values_unfold = Unfold(kernel_size=kernel_size,
                                    strides=strides,
                                    dilation_rate=dilation_rate,
                                    padding="SAME",
                                    name="Unfold_Values_{}".format(self.name))
        # endregion

        # region Time/Height/Width embeddings
        conv_embeddings = []
        for i in range(rank):
            dim_embeddings_size = self.filters // rank
            if i == 0:
                dim_embeddings_size += self.filters % rank

            dim_embeddings_shape = (dim_embeddings_size, *[1] * i,
                                    kernel_size[i], *[1] * (rank - i - 1))
            dim_embeddings = self.add_weight(
                name="dim_{}_embeddings".format(i + 1),
                shape=dim_embeddings_shape,
                dtype=tf.float32,
                initializer=self.embeddings_initializer)
            conv_embeddings.append(dim_embeddings)
        self.conv_embeddings = conv_embeddings
Beispiel #24
0
    def __init__(self,
                 rank,
                 lgroups,
                 lfilters,
                 kernel_size,
                 strides=1,
                 padding='valid',
                 data_format=None,
                 dilation_rate=1,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 trainable=True,
                 name=None,
                 **kwargs):
        super(_GroupConv, self).__init__(
            trainable=trainable,
            name=name,
            activity_regularizer=regularizers.get(activity_regularizer),
            **kwargs)
        self.rank = rank
        if rank > 2:
            raise ValueError(
                'The quick group convolution does not support 3D or any higher dimension.'
            )
        initRank = rank
        self.lgroups = lgroups
        self.lfilters = lfilters
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                      'kernel_size')
        self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        if (self.padding == 'causal'
                and not isinstance(self, (Conv1D, SeparableConv1D))):
            raise ValueError(
                'Causal padding is only supported for `Conv1D` and ``SeparableConv1D`.'
            )
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, rank, 'dilation_rate')
        if rank == 1:  # when rank=1, expand the tuples to 2D case.
            self.kernel_size = (1, *self.kernel_size)
            self.strides = (1, *self.strides)
            self.dilation_rate = (1, *self.dilation_rate)
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(ndim=self.rank + 2)

        self.group_input_dim = None
        self.exp_dim_pos = None
Beispiel #25
0
    def __init__(self,
                 filters,
                 kernel_size,
                 strides=(1, 1),
                 padding='valid',
                 data_format=None,
                 dilation_rate=(1, 1),
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super(Conv2D, self).__init__(
            #rank=2,
            filters=filters,
            kernel_size=kernel_size,
            strides=strides,
            padding=padding,
            data_format=data_format,
            dilation_rate=dilation_rate,
            activation=activations.get(activation),
            use_bias=use_bias,
            kernel_initializer=initializers.get(kernel_initializer),
            bias_initializer=initializers.get(bias_initializer),
            kernel_regularizer=regularizers.get(kernel_regularizer),
            bias_regularizer=regularizers.get(bias_regularizer),
            activity_regularizer=regularizers.get(activity_regularizer),
            kernel_constraint=constraints.get(kernel_constraint),
            bias_constraint=constraints.get(bias_constraint),
            **kwargs)

        def call(self, inputs):
            weights = self.kernel
            d = tf.math.sqrt(
                tf.math.sum(
                    tf.math.square(weights), axis=[1, 2, 3], keepdims=True) +
                1e-8)
            weights = weights / d

            outputs = self._convolution_op(inputs, weights)

            if self.use_bias:
                if self.data_format == 'channels_first':
                    if self.rank == 1:
                        # nn.bias_add does not accept a 1D input tensor.
                        bias = array_ops.reshape(self.bias,
                                                 (1, self.filters, 1))
                        outputs += bias
                    else:
                        outputs = nn.bias_add(outputs,
                                              self.bias,
                                              data_format='NCHW')
                else:
                    outputs = nn.bias_add(outputs,
                                          self.bias,
                                          data_format='NHWC')

            if self.activation is not None:
                return self.activation(outputs)
            return outputs
 def __init__(self,
              filters,
              kernel_size,
              rank=2,
              strides=(1, 1),
              padding='valid',
              data_format=None,
              dilation_rate=(1, 1),
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              trainable=True,
              name=None,
              lambda_l1=None,
              lambda_mask=None,
              shared=None,
              adaptive=None,
              from_kb=None,
              atten=None,
              mask=None,
              bias=None,
              **kwargs):
     super(DecomposedConv, self).__init__(
         trainable=trainable,
         name=name,
         activity_regularizer=regularizers.get(activity_regularizer),
         **kwargs)
     self.rank = rank
     self.filters = filters
     self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                   'kernel_size')
     self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
     self.padding = conv_utils.normalize_padding(padding)
     if (self.padding == 'causal'
             and not isinstance(self, (Conv1D, SeparableConv1D))):
         raise ValueError('Causal padding is only supported for `Conv1D`'
                          'and ``SeparableConv1D`.')
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.dilation_rate = conv_utils.normalize_tuple(
         dilation_rate, rank, 'dilation_rate')
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(ndim=self.rank + 2)
     self.sw = shared
     self.aw = adaptive
     self.mask = mask
     self.bias = bias
     self.aw_kb = from_kb
     self.atten = atten
     self.lambda_l1 = lambda_l1
     self.lambda_mask = lambda_mask
Beispiel #27
0
    def __init__(self,
                 units,
                 num_experts,
                 num_tasks,
                 use_expert_bias=True,
                 use_gate_bias=True,
                 expert_activation='relu',
                 gate_activation='softmax',
                 expert_bias_initializer='zeros',
                 gate_bias_initializer='zeros',
                 expert_bias_regularizer=None,
                 gate_bias_regularizer=None,
                 expert_bias_constraint=None,
                 gate_bias_constraint=None,
                 expert_kernel_initializer='VarianceScaling',
                 gate_kernel_initializer='VarianceScaling',
                 expert_kernel_regularizer=None,
                 gate_kernel_regularizer=None,
                 expert_kernel_constraint=None,
                 gate_kernel_constraint=None,
                 activity_regularizer=None,
                 **kwargs):
        """
         Method for instantiating MMoE layer.

        :param units: Number of hidden units
        :param num_experts: Number of experts
        :param num_tasks: Number of tasks
        :param use_expert_bias: Boolean to indicate the usage of bias in the expert weights
        :param use_gate_bias: Boolean to indicate the usage of bias in the gate weights
        :param expert_activation: Activation function of the expert weights
        :param gate_activation: Activation function of the gate weights
        :param expert_bias_initializer: Initializer for the expert bias
        :param gate_bias_initializer: Initializer for the gate bias
        :param expert_bias_regularizer: Regularizer for the expert bias
        :param gate_bias_regularizer: Regularizer for the gate bias
        :param expert_bias_constraint: Constraint for the expert bias
        :param gate_bias_constraint: Constraint for the gate bias
        :param expert_kernel_initializer: Initializer for the expert weights
        :param gate_kernel_initializer: Initializer for the gate weights
        :param expert_kernel_regularizer: Regularizer for the expert weights
        :param gate_kernel_regularizer: Regularizer for the gate weights
        :param expert_kernel_constraint: Constraint for the expert weights
        :param gate_kernel_constraint: Constraint for the gate weights
        :param activity_regularizer: Regularizer for the activity
        :param kwargs: Additional keyword arguments for the Layer class
        """
        # Hidden nodes parameter
        self.units = units
        self.num_experts = num_experts
        self.num_tasks = num_tasks

        # Weight parameter
        self.expert_kernels = None
        self.gate_kernels = None
        self.expert_kernel_initializer = initializers.get(
            expert_kernel_initializer)
        self.gate_kernel_initializer = initializers.get(
            gate_kernel_initializer)
        self.expert_kernel_regularizer = regularizers.get(
            expert_kernel_regularizer)
        self.gate_kernel_regularizer = regularizers.get(
            gate_kernel_regularizer)
        self.expert_kernel_constraint = constraints.get(
            expert_kernel_constraint)
        self.gate_kernel_constraint = constraints.get(gate_kernel_constraint)

        # Activation parameter
        self.expert_activation = activations.get(expert_activation)
        self.gate_activation = activations.get(gate_activation)

        # Bias parameter
        self.expert_bias = None
        self.gate_bias = None
        self.use_expert_bias = use_expert_bias
        self.use_gate_bias = use_gate_bias
        self.expert_bias_initializer = initializers.get(
            expert_bias_initializer)
        self.gate_bias_initializer = initializers.get(gate_bias_initializer)
        self.expert_bias_regularizer = regularizers.get(
            expert_bias_regularizer)
        self.gate_bias_regularizer = regularizers.get(gate_bias_regularizer)
        self.expert_bias_constraint = constraints.get(expert_bias_constraint)
        self.gate_bias_constraint = constraints.get(gate_bias_constraint)

        # Activity parameter
        self.activity_regularizer = regularizers.get(activity_regularizer)

        # Keras parameter
        self.input_spec = InputSpec(min_ndim=2)
        self.supports_masking = True

        super(MMoE, self).__init__(**kwargs)
Beispiel #28
0
    def __init__(self,
                 units,
                 feature_units,
                 memory_size,
                 conv_units,
                 n_actions,
                 num_head=10,
                 dropout=0.0,
                 use_bias=True,
                 compression_rate=4,
                 gate_initializer='glorot_normal',
                 gate_regularizer='l2',
                 gate_constraint=None,
                 state_initializer='glorot_uniform',
                 state_constraint=None,
                 bias_initializer='zeros',
                 bias_regularizer='l2',
                 bias_constraint=None,
                 **kwargs):
        """OSAR - Object-Stimulated Active Repeater

        # Arguments
            units: int > 0. Number of classic units inside the layer.
            feature_units: int > 0. Number of output features in the layer.
            memory_size: int > 1. Size of the dictionary memory. Big numbers allows to store more combinations, but requires more space and speed.
            conv_units: int > 0. Number of convolution units in the layer.
            n_actions: int > 1. Number of actions in the output vector.
            num_heads: int > 0. Number of attention `heads`.
            dropout: 0.0 <= float <= 1.0. Dropout rate inside attention weights.
            use_bias: Boolean. Whether to use bias (dafault - True).
            compression_rate: int. Compression rate of the transfomer memories.
            gate_initializer: keras.initializer. Initializer for attention weights (default - glorot_normal).
            gate_regularizer: keras.regularizer. Regularizer for attention weights (default - l2).
            gate_constraint: keras.constraint. Constraint for attention weights (default - None).
            state_initializer: keras.initializer. Initializer for state matrices (default - glorot_uniform).
            state_constraint: keras.constraint. Constraint for state matrices (default - None).
            bias_initializer: keras.initializer. Initializer for biases (default - zeros).
            bias_regularizer: keras.regularizer. Regularizer for biases (default - l2).
            bias_constraint: keras.constraint. Constraint for attention weights (default - None).
        
        # Input Shape
            3D tensor with shape: `(batch_size, sequence_length, units)`.
        # Output Shape
            
        # References
            - None yet

        """
        # return_runtime is a flag for testing, which shows the real backend
        # implementation chosen by grappler in graph mode.
        self._return_runtime = kwargs.pop('return_runtime', False)

        super(OSAR, self).__init__(dynamic=True, **kwargs)

        self.units = units
        self.n_actions = n_actions
        self.conv_units = conv_units
        self.feature_units = feature_units
        self.num_head = num_head
        self.dropout = dropout
        self.use_bias = use_bias
        self.memory_size = memory_size
        self.compression_rate = compression_rate
        self.state_constraint = constraints.get(state_constraint)
        self.state_initializer = initializers.get(state_initializer)

        self.gate_initializer = initializers.get(gate_initializer)
        self.gate_regularizer = regularizers.get(gate_regularizer)
        self.gate_constraint = constraints.get(gate_constraint)

        self.bias_initializer = initializers.get(bias_initializer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.bias_constraint = constraints.get(bias_constraint)
Beispiel #29
0
 def get_constraint(self, name):
     if name not in self.constraints:
         self.constraints[name] = constraints.get(None)
     return self.constraints[name]
Beispiel #30
0
    def __init__(
            self,
            filters,
            kernel_size,
            feature_number,  # ????feature
            strides=(1, 1),
            padding='valid',
            data_format=None,
            dilation_rate=(1, 1),
            activation='tanh',
            recurrent_activation='hard_sigmoid',
            conv_activation='hard_sigmoid',
            convolutional_type="early",
            use_bias=True,
            kernel_initializer='glorot_uniform',
            recurrent_initializer='orthogonal',
            bias_initializer='zeros',
            unit_forget_bias=True,
            kernel_regularizer=None,
            recurrent_regularizer=None,
            bias_regularizer=None,
            kernel_constraint=None,
            recurrent_constraint=None,
            bias_constraint=None,
            dropout=0.,
            recurrent_dropout=0.,
            **kwargs):
        """
        filters : A list , Specifies the number of filters in each layer, e.g. [10,10]
        kernel_size : A List , Same length as filters, Window size for 1D convolution e.g. [3,3]    # ????feature
        feature_number: int , Number of multiple time series e.g 28 sensors -->  28 
        recurrent_activation : A str List, Specifies the tupe of activation functions
        
        
        """

        super(ConvLSTM1DCell, self).__init__(**kwargs)

        self.number_of_layer = len(filters)

        self.out_feature_number = feature_number
        self.convolutional_type = convolutional_type

        # =============   Each layer has different parameters    ======================
        self.filters = filters
        self.conv_layer_number = len(filters)

        self.kernel_size = []

        for index, size in enumerate(kernel_size):
            if self.convolutional_type[index] == "hybrid":
                self.kernel_size.append(
                    conv_utils.normalize_tuple((size, 1), 2, 'kernel_size'))
            if self.convolutional_type[index] == "early":
                self.kernel_size.append(
                    conv_utils.normalize_tuple((size, feature_number), 2,
                                               'kernel_size'))
                self.out_feature_number = 1
                feature_number = 1

        self.recurrent_activation = []
        for acti in recurrent_activation:
            self.recurrent_activation.append(activations.get(acti))

        self.conv_activation = []
        for acti in conv_activation:
            self.conv_activation.append(activations.get(acti))

        self.state_size = (self.filters[-1], self.filters[-1])

        # =============   Each layer has the same parameter   ======================
        self.strides = conv_utils.normalize_tuple(strides, 2,
                                                  'strides')  # (1,1)
        self.padding = conv_utils.normalize_padding(padding)  # valid
        self.data_format = conv_utils.normalize_data_format(
            data_format)  # None --- -1
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, 2, 'dilation_rate')
        self.activation = activations.get(activation)  # tanh default
        self.use_bias = use_bias  # True
        self.kernel_initializer = initializers.get(
            kernel_initializer)  # glorot_uniform
        self.recurrent_initializer = initializers.get(
            recurrent_initializer)  # orthogonal
        self.bias_initializer = initializers.get(bias_initializer)  # zeros
        self.unit_forget_bias = unit_forget_bias  # True

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
Beispiel #31
0
    def __init__(self,
                 sub_units,
                 sub_lstms,
                 cake_activation='tanh',
                 cake_recurrent_activation='hard_sigmoid',
                 sub_activation='tanh',
                 sub_recurrent_activation='hard_sigmoid',
                 cake_use_bias=True,
                 sub_use_bias=True,
                 cake_kernel_initializer='glorot_uniform',
                 cake_recurrent_initializer='orthogonal',
                 cake_bias_initializer='zeros',
                 sub_kernel_initializer='glorot_uniform',
                 sub_recurrent_initializer='orthogonal',
                 sub_bias_initializer='zeros',
                 cake_unit_forget_bias=True,
                 sub_unit_forget_bias=True,
                 cake_kernel_regularizer=None,
                 cake_recurrent_regularizer=None,
                 cake_bias_regularizer=None,
                 sub_kernel_regularizer=None,
                 sub_recurrent_regularizer=None,
                 sub_bias_regularizer=None,
                 cake_kernel_constraint=None,
                 cake_recurrent_constraint=None,
                 cake_bias_constraint=None,
                 sub_kernel_constraint=None,
                 sub_recurrent_constraint=None,
                 sub_bias_constraint=None,
                 cake_dropout=0.,
                 cake_recurrent_dropout=0.,
                 sub_dropout=0.,
                 sub_recurrent_dropout=0.,
                 implementation=1,
                 **kwargs):
        super(JujubeCakeCell, self).__init__(**kwargs)
        self.sub_units = sub_units
        self.sub_lstms = sub_lstms
        self.units = self.sub_units * self.sub_lstms

        self.cake_activation = activations.get(cake_activation)
        self.cake_recurrent_activation = activations.get(
            cake_recurrent_activation)
        self.sub_activation = activations.get(sub_activation)
        self.sub_recurrent_activation = activations.get(
            sub_recurrent_activation)
        self.cake_use_bias = cake_use_bias
        self.sub_use_bias = sub_use_bias

        self.cake_kernel_initializer = initializers.get(
            cake_kernel_initializer)
        self.cake_recurrent_initializer = initializers.get(
            cake_recurrent_initializer)
        self.cake_bias_initializer = initializers.get(cake_bias_initializer)
        self.sub_kernel_initializer = initializers.get(sub_kernel_initializer)
        self.sub_recurrent_initializer = initializers.get(
            sub_recurrent_initializer)
        self.sub_bias_initializer = initializers.get(sub_bias_initializer)

        self.cake_unit_forget_bias = cake_unit_forget_bias
        self.sub_unit_forget_bias = sub_unit_forget_bias

        self.cake_kernel_regularizer = regularizers.get(
            cake_kernel_regularizer)
        self.cake_recurrent_regularizer = regularizers.get(
            cake_recurrent_regularizer)
        self.cake_bias_regularizer = regularizers.get(cake_bias_regularizer)
        self.sub_kernel_regularizer = regularizers.get(sub_kernel_regularizer)
        self.sub_recurrent_regularizer = regularizers.get(
            sub_recurrent_regularizer)
        self.sub_bias_regularizer = regularizers.get(sub_bias_regularizer)

        self.cake_kernel_constraint = constraints.get(cake_kernel_constraint)
        self.cake_recurrent_constraint = constraints.get(
            cake_recurrent_constraint)
        self.cake_bias_constraint = constraints.get(cake_bias_constraint)
        self.sub_kernel_constraint = constraints.get(sub_kernel_constraint)
        self.sub_recurrent_constraint = constraints.get(
            sub_recurrent_constraint)
        self.sub_bias_constraint = constraints.get(sub_bias_constraint)

        self.cake_dropout = min(1., max(0., cake_dropout))
        self.cake_recurrent_dropout = min(1., max(0., cake_recurrent_dropout))
        self.sub_dropout = min(1., max(0., sub_dropout))
        self.sub_recurrent_dropout = min(1., max(0., sub_recurrent_dropout))

        self.implementation = implementation

        self.state_size = [
            self.units, self.units, self.sub_units, self.sub_units
        ]
        self.sub_state_size = [self.sub_units, self.sub_units]

        self._dropout_mask = None
        self._recurrent_dropout_mask = None
        self._sub_dropout_mask = None
        self._sub_recurrent_dropout_mask = None
    def __init__(self,
                 units,
                 activation='relu',
                 use_bias=True,
                 add_self_loops=False,
                 aggregation_method='sum',
                 graph_regularization=None,
                 num_bases=None,
                 kernel_initializer='glorot_uniform',
                 kernel_coef_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 kernel_coef_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 kernel_coef_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super(GraphConv, self).__init__(
            activity_regularizer=regularizers.get(activity_regularizer),
            **kwargs)

        if graph_regularization not in [None, 'basis']:
            raise ValueError('An invalid value of `graph_regularization` has' +
                             ' been passed to a `GraphConv` layer.')

        if graph_regularization is not None and num_bases is None:
            raise ValueError('The `num_bases` property must be set if ' +
                             '`graph_regularization` is not None.')

        if graph_regularization is None and num_bases is not None:
            raise ValueError('The `num_bases` property must not be set if ' +
                             '`graph_regularization` is None.')

        if num_bases is not None and num_bases <= 0:
            raise ValueError('The `num_bases` property must be a positive ' +
                             'integer.')

        self.units = int(units)
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.add_self_loops = add_self_loops
        self.aggregation_method = aggregation_method
        self.graph_regularization = graph_regularization
        self.num_bases = num_bases
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.kernel_coef_initializer = initializers.get(
            kernel_coef_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.kernel_coef_regularizer = regularizers.get(
            kernel_coef_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.kernel_coef_constraint = constraints.get(kernel_coef_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.input_spec = {
            NODE_FEATURES: InputSpec(min_ndim=2),
            EDGE_FEATURES: InputSpec(ndim=4)
        }
Beispiel #33
0
 def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1),
              groups=1, activation=None, use_bias=True, dtype=DEFAULT_COMPLEX_TYPE,
              kernel_initializer=ComplexGlorotUniform(), bias_initializer=Zeros(),
              kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
              kernel_constraint=None, bias_constraint=None, **kwargs):
     """
     :param filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution).
     :param kernel_size: An integer or tuple/list of 2 integers, specifying the height
         and width of the 2D convolution window. Can be a single integer to specify
         the same value for all spatial dimensions.
     :param strides: An integer or tuple/list of 2 integers, specifying the strides of
         the convolution along the height and width. Can be a single integer to
         specify the same value for all spatial dimensions. Specifying any stride
         value != 1 is incompatible with specifying any `dilation_rate` value != 1.
     :param padding: one of `"valid"` or `"same"` (case-insensitive).
         `"valid"` means no padding. `"same"` results in padding evenly to
         the left/right or up/down of the input such that output has the same
         height/width dimension as the input.
     :param data_format: A string, one of `channels_last` (default) or `channels_first`.
         The ordering of the dimensions in the inputs. `channels_last` corresponds
         to inputs with shape `(batch_size, height, width, channels)` while
         `channels_first` corresponds to inputs with shape `(batch_size, channels,
         height, width)`. It defaults to the `image_data_format` value found in
         your Keras config file at `~/.keras/keras.json`. If you never set it, then
         it will be `channels_last`.
     :param dilation_rate: an integer or tuple/list of 2 integers, specifying the
         dilation rate to use for dilated convolution. Can be a single integer to
         specify the same value for all spatial dimensions. Currently, specifying
         any `dilation_rate` value != 1 is incompatible with specifying any stride
         value != 1.
     :param groups: A positive integer specifying the number of groups in which the
         input is split along the channel axis. Each group is convolved separately
         with `filters / groups` filters. The output is the concatenation of all
         the `groups` results along the channel axis. Input channels and `filters`
         must both be divisible by `groups`.
     :param activation: Activation function to use. If you don't specify anything, no activation is applied.
         For complex :code:`dtype`, this must be a :code:`cvnn.activations` module.
     :param use_bias: Boolean, whether the layer uses a bias vector.
     :param kernel_initializer: Initializer for the `kernel` weights matrix (see `keras.initializers`).
     :param bias_initializer: Initializer for the bias vector (see `keras.initializers`).
     :param kernel_regularizer: Regularizer function applied to the `kernel` weights matrix (see `keras.regularizers`).
     :param bias_regularizer: Regularizer function applied to the bias vector (see `keras.regularizers`).
     :param activity_regularizer: Regularizer function applied to the output of the layer (its "activation") (see `keras.regularizers`).
     :param kernel_constraint: Constraint function applied to the kernel matrix (see `keras.constraints`).
     :param bias_constraint: Constraint function applied to the bias vector (see `keras.constraints`).
     """
     super(ComplexConv2D, self).__init__(
         rank=2, dtype=dtype,
         filters=filters,
         kernel_size=kernel_size,
         strides=strides,
         padding=padding,
         data_format=data_format,
         dilation_rate=dilation_rate,
         groups=groups,
         activation=activations.get(activation),
         use_bias=use_bias,
         kernel_initializer=initializers.get(kernel_initializer),
         bias_initializer=initializers.get(bias_initializer),
         kernel_regularizer=regularizers.get(kernel_regularizer),
         bias_regularizer=regularizers.get(bias_regularizer),
         activity_regularizer=regularizers.get(activity_regularizer),
         kernel_constraint=constraints.get(kernel_constraint),
         bias_constraint=constraints.get(bias_constraint),
         **kwargs)
Beispiel #34
0
    def __init__(self,
                 rank,
                 kernel_size,
                 growth_rate,
                 depth,
                 output_filters=None,
                 use_bottleneck=True,
                 bottleneck_filters_multiplier=4,
                 use_batch_normalization=True,
                 data_format=None,
                 activation="relu",
                 use_bias=True,
                 kernel_initializer="he_normal",
                 bias_initializer="zeros",
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):

        if rank not in [1, 2, 3]:
            raise ValueError(
                "`rank` must be in [1, 2, 3]. Got {}".format(rank))

        super(DenseBlockND, self).__init__(**kwargs)

        self.rank = rank
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                      "kernel_size")
        self.output_filters = output_filters
        self.growth_rate = growth_rate

        if use_bottleneck:
            if (depth % 2) != 0:
                raise ValueError(
                    "Depth must be a multiple of 2 when using bottlenecks. Got {}."
                    .format(depth))

        self._depth = depth // 2 if use_bottleneck else depth
        self.use_bottleneck = use_bottleneck
        self.bottleneck_filters_multiplier = bottleneck_filters_multiplier

        self.use_batch_normalization = use_batch_normalization

        self.data_format = conv_utils.normalize_data_format(data_format)
        self.channel_axis = -1 if self.data_format == "channels_last" else 1

        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.composite_function_blocks: Optional[
            List[CompositeFunctionBlock]] = None
        self.transition_layer = None

        self.input_spec = InputSpec(ndim=self.rank + 2)
Beispiel #35
0
    def __init__(self,
                 filters,
                 kernel_size,
                 cov_kernel_size=(3, 1),
                 strides=(1, 1),
                 padding='valid',
                 data_format=None,
                 dilation_rate=(1, 1),
                 activation='tanh',
                 recurrent_activation='hard_sigmoid',
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 unit_forget_bias=True,
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 recurrent_constraint=None,
                 bias_constraint=None,
                 dropout=0.,
                 recurrent_dropout=0.,
                 **kwargs):
        super(ConvLSTM2DCell_2, self).__init__(**kwargs)
        self.filters = filters
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2,
                                                      'kernel_size')
        #############################
        self.cov_kernel_size = cov_kernel_size
        self.kernel_size_1 = conv_utils.normalize_tuple(
            cov_kernel_size, 2, 'kernel_size')
        ##################################
        self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, 2, 'dilation_rate')
        self.activation = activations.get(activation)
        self.recurrent_activation = activations.get(recurrent_activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        ###############################
        self.cov_kernel_initializer = initializers.get(kernel_initializer)
        ##########################
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.unit_forget_bias = unit_forget_bias

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
        self.state_size = (self.filters, self.filters)
Beispiel #36
0
 def __init__(self,
                 filters,
                 kernel_size,
                 strides=(1, 1),
                 padding='valid',
                 data_format=None,
                 dilation_rate=(1, 1),
                 depth_multiplier=1,
                 activation=None,
                 use_bias=True,
                 depthwise_initializer='glorot_uniform',
                 pointwise_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 depthwise_regularizer=None,
                 pointwise_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 depthwise_constraint=None,
                 pointwise_constraint=None,
                 bias_constraint=None,
                 activations_datatype_size_byte=1,
                 weights_datatype_size_byte=1,
                 results_datatype_size_byte=4,
                 systolic_array_height=256,
                 systolic_array_width=256,
                 activation_fifo_depth=8,
                 accumulator_array_height=4096,
                 log_file_output_dir='.',
                 model_name='unnamed',
                 **kwargs):
   
     super(mpusim_separable_conv2d, self).__init__(
                                                 rank=2,
                                                 filters=filters,
                                                 kernel_size=kernel_size,
                                                 strides=strides,
                                                 padding=padding,
                                                 data_format=data_format,
                                                 dilation_rate=dilation_rate,
                                                 depth_multiplier=depth_multiplier,
                                                 activation=activations.get(activation),
                                                 use_bias=use_bias,
                                                 depthwise_initializer=initializers.get(depthwise_initializer),
                                                 pointwise_initializer=initializers.get(pointwise_initializer),
                                                 bias_initializer=initializers.get(bias_initializer),
                                                 depthwise_regularizer=regularizers.get(depthwise_regularizer),
                                                 pointwise_regularizer=regularizers.get(pointwise_regularizer),
                                                 bias_regularizer=regularizers.get(bias_regularizer),
                                                 activity_regularizer=regularizers.get(activity_regularizer),
                                                 depthwise_constraint=constraints.get(depthwise_constraint),
                                                 pointwise_constraint=constraints.get(pointwise_constraint),
                                                 bias_constraint=constraints.get(bias_constraint),
                                                 **kwargs)
     
     self.activations_datatype_size_byte=activations_datatype_size_byte
     self.weights_datatype_size_byte=weights_datatype_size_byte
     self.results_datatype_size_byte=results_datatype_size_byte
     self.systolic_array_height=systolic_array_height
     self.systolic_array_width=systolic_array_width
     self.activation_fifo_depth=activation_fifo_depth
     self.accumulator_array_height=accumulator_array_height
     self.log_file_output_dir=log_file_output_dir
     self.model_name=model_name
Beispiel #37
0
    def __init__(self,
                 units_vec,
                 modules=None,
                 tau_vec=1.,
                 connectivity='dense',
                 activation='tanh',
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 recurrent_constraint=None,
                 bias_constraint=None,
                 dropout=0.,
                 recurrent_dropout=0.,
                 **kwargs):
        super(CTRNNCell, self).__init__(**kwargs)
        self.connectivity = connectivity

        if isinstance(units_vec, list):
            self.units_vec = units_vec[:]
            self.modules = len(units_vec)
            self.units = 0
            for k in range(self.modules):
                self.units += units_vec[k]
        else:
            self.units = units_vec
            if modules is not None and modules > 1:
                self.modules = int(modules)
                self.units_vec = [
                    units_vec // modules for k in range(self.modules)
                ]
            else:
                self.modules = 1
                self.units_vec = [units_vec]
                self.connectivity = 'dense'

        # smallest timescale should be 1.0
        if isinstance(tau_vec, list):
            if len(tau_vec) != self.modules:
                raise ValueError("vector of tau must be of same size as "
                                 "num_modules or size of vector of num_units")
            self.tau_vec = tau_vec[:]
            self.taus = array_ops.constant([[max(1., float(tau_vec[k]))]
                                            for k in range(self.modules)
                                            for n in range(self.units_vec[k])],
                                           dtype=self.dtype,
                                           shape=[self.units],
                                           name="taus")
        else:
            if self.modules > 1:
                self.tau_vec = [
                    max(1., float(tau_vec)) for k in range(self.modules)
                ]
            else:
                self.tau_vec = [max(1., float(tau_vec))]
            self.taus = array_ops.constant(max(1., tau_vec),
                                           dtype=self.dtype,
                                           shape=[self.units],
                                           name="taus")

        self.activation = activations.get(activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
        self.state_size = (self.units, self.units)
        self.output_size = (self.units, self.units)
        self._dropout_mask = None
        self._recurrent_dropout_mask = None
  def __init__(self,
               units,
               activation='tanh',
               init_kern_1='RandomUniform',
               init_kern_2='RandomUniform',
               init_kern_3='RandomUniform',
               init_recur_kern_2='orthogonal',
               init_out_fb='orthogonal',
               init_bias='zeros',
               connectivity=None,
               reg_kern_1=None,
               reg_kern_3=None,
               reg_out_fb=None,
               reg_bias=None,
               constraint_kern_1=None,
               constraint_kern_3=None,
               constraint_recur_kern_2=None,
               constraint_out_fb=None,
               constraint_bias=None,
               in_dropout=None,
               recur_dropout=None,
               train_kern_1=False,
               train_kern_3=False,
               train_out_fb=False,
               use_out_fb=False,
               use_dropout_mask=False,
               use_recur=False,
               use_clock=False,
               clock_rate=None,
               data_format=None,
               **kwargs):
    super(SimpleRNNCell, self).__init__(**kwargs)
    self.units = units

    self.activation = activations.get(activation)

    self.init_kern_1 = initializers.get(init_kern_1)
    self.init_kern_2 = initializers.get(init_kern_2)
    self.init_kern_3 = initializers.get(init_kern_3)
    self.init_recur_kern_2 = initializers.get(init_recur_kern_2)
    self.init_out_fb = initializers.get(init_out_fb)
    self.init_bias = initializers.get(init_bias)

    if len(connectivity) is not 3:
       self.connectivity_1 = 1.
       self.connectivity_2 = 1.
       self.connectivity_3 = 1.
    else:     
       self.connectivity_1 =  min(1., max(0.,connectivity[0]))
       self.connectivity_2 =  min(1., max(0.,connectivity[1]))
       self.connectivity_3 =  min(1., max(0.,connectivity[2]))
    
    self.reg_kern_1 = regularizers.get(reg_kern_1)
    self.reg_kern_3 = regularizers.get(reg_kern_3)
    self.reg_out_fb = regularizers.get(reg_out_fb)
    self.reg_bias = regularizers.get(reg_bias)
    
    self.constraint_kern_1 = constraints.get(constraint_kern_1)
    self.constraint_kern_3 = constraints.get(constraint_kern_3) 
    self.constraint_recur_kern_2 = constraints.get(constraint_recur_kern_2)
    self.constraint_out_fb = constraints.get(constraint_out_fb)
    self.constraint_bias = constraints.get(constraint_bias)
                                       
    self.in_dropout = min(1., max(0.,in_dropout))
    self.recur_dropout = min(1., max(0.,recur_dropout))
    
    self.train_kern_1 = train_kern_1
    self.train_kern_3 = train_kern_3

    self.clock = clock  
    self.clock_rate = clock_rate

    self.in_dropout_mask = None
    self.recur_dropout_mask = None
    self.state_size = self.units
    self.output_size = self.units
    self.tf_data_format = self.data_format
    self.clock_kernel = None