Esempio n. 1
0
  def __init__(self,
               axis=-1,
               momentum=0.99,
               epsilon=1e-3,
               center=True,
               scale=True,
               beta_initializer='zeros',
               gamma_initializer='ones',
               moving_mean_initializer='zeros',
               moving_variance_initializer='ones',
               beta_regularizer=None,
               gamma_regularizer=None,
               beta_constraint=None,
               gamma_constraint=None,
               renorm=False,
               renorm_clipping=None,
               renorm_momentum=0.99,
               fused=None,
               trainable=True,
               virtual_batch_size=None,
               adjustment=None,
               name=None,
               **kwargs):
    super(BatchNormalization, self).__init__(
        name=name, trainable=trainable, **kwargs)
    if isinstance(axis, list):
      self.axis = axis[:]
    else:
      self.axis = axis
    self.momentum = momentum
    self.epsilon = epsilon
    self.center = center
    self.scale = scale
    self.beta_initializer = initializers.get(beta_initializer)
    self.gamma_initializer = initializers.get(gamma_initializer)
    self.moving_mean_initializer = initializers.get(moving_mean_initializer)
    self.moving_variance_initializer = initializers.get(
        moving_variance_initializer)
    self.beta_regularizer = regularizers.get(beta_regularizer)
    self.gamma_regularizer = regularizers.get(gamma_regularizer)
    self.beta_constraint = constraints.get(beta_constraint)
    self.gamma_constraint = constraints.get(gamma_constraint)
    self.renorm = renorm
    self.virtual_batch_size = virtual_batch_size
    self.adjustment = adjustment
    if fused is None:
      fused = True
    self.supports_masking = True

    self.fused = fused
    self._bessels_correction_test_only = True

    if renorm:
      renorm_clipping = renorm_clipping or {}
      keys = ['rmax', 'rmin', 'dmax']
      if set(renorm_clipping) - set(keys):
        raise ValueError('renorm_clipping %s contains keys not in %s' %
                         (renorm_clipping, keys))
      self.renorm_clipping = renorm_clipping
      self.renorm_momentum = renorm_momentum
Esempio n. 2
0
  def __init__(self,
               units,
               activation=None,
               use_bias=True,
               kernel_initializer='glorot_uniform',
               bias_initializer='zeros',
               kernel_regularizer=None,
               bias_regularizer=None,
               activity_regularizer=None,
               kernel_constraint=None,
               bias_constraint=None,
               **kwargs):
    if 'input_shape' not in kwargs and 'input_dim' in kwargs:
      kwargs['input_shape'] = (kwargs.pop('input_dim'),)

    super(Dense, self).__init__(
        activity_regularizer=regularizers.get(activity_regularizer), **kwargs)
    self.units = int(units)
    self.activation = activations.get(activation)
    self.use_bias = use_bias
    self.kernel_initializer = initializers.get(kernel_initializer)
    self.bias_initializer = initializers.get(bias_initializer)
    self.kernel_regularizer = regularizers.get(kernel_regularizer)
    self.bias_regularizer = regularizers.get(bias_regularizer)
    self.kernel_constraint = constraints.get(kernel_constraint)
    self.bias_constraint = constraints.get(bias_constraint)

    self.supports_masking = True
    self.input_spec = InputSpec(min_ndim=2)
Esempio n. 3
0
 def __init__(self,
              filters,
              kernel_size,
              strides=1,
              padding='valid',
              data_format=None,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
   super(LocallyConnected1D, self).__init__(**kwargs)
   self.filters = filters
   self.kernel_size = conv_utils.normalize_tuple(kernel_size, 1, 'kernel_size')
   self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
   self.padding = conv_utils.normalize_padding(padding)
   if self.padding != 'valid':
     raise ValueError('Invalid border mode for LocallyConnected1D '
                      '(only "valid" is supported): ' + padding)
   self.data_format = conv_utils.normalize_data_format(data_format)
   self.activation = activations.get(activation)
   self.use_bias = use_bias
   self.kernel_initializer = initializers.get(kernel_initializer)
   self.bias_initializer = initializers.get(bias_initializer)
   self.kernel_regularizer = regularizers.get(kernel_regularizer)
   self.bias_regularizer = regularizers.get(bias_regularizer)
   self.activity_regularizer = regularizers.get(activity_regularizer)
   self.kernel_constraint = constraints.get(kernel_constraint)
   self.bias_constraint = constraints.get(bias_constraint)
   self.input_spec = InputSpec(ndim=3)
Esempio n. 4
0
  def __init__(self,
               axis=-1,
               epsilon=1e-3,
               center=True,
               scale=True,
               beta_initializer='zeros',
               gamma_initializer='ones',
               beta_regularizer=None,
               gamma_regularizer=None,
               beta_constraint=None,
               gamma_constraint=None,
               trainable=True,
               name=None,
               **kwargs):
    super(LayerNormalization, self).__init__(
        name=name, trainable=trainable, **kwargs)
    if isinstance(axis, (list, tuple)):
      self.axis = axis[:]
    elif isinstance(axis, int):
      self.axis = axis
    else:
      raise ValueError('Expected an int or a list/tuple of ints for the '
                       'argument \'axis\', but received instead: %s' % axis)

    self.epsilon = epsilon
    self.center = center
    self.scale = scale
    self.beta_initializer = initializers.get(beta_initializer)
    self.gamma_initializer = initializers.get(gamma_initializer)
    self.beta_regularizer = regularizers.get(beta_regularizer)
    self.gamma_regularizer = regularizers.get(gamma_regularizer)
    self.beta_constraint = constraints.get(beta_constraint)
    self.gamma_constraint = constraints.get(gamma_constraint)

    self.supports_masking = True
Esempio n. 5
0
  def __init__(self,
               units,
               activation='tanh',
               recurrent_activation='hard_sigmoid',
               kernel_initializer='glorot_uniform',
               recurrent_initializer='orthogonal',
               bias_initializer='zeros',
               unit_forget_bias=True,
               kernel_regularizer=None,
               recurrent_regularizer=None,
               bias_regularizer=None,
               activity_regularizer=None,
               kernel_constraint=None,
               recurrent_constraint=None,
               bias_constraint=None,
               return_sequences=False,
               return_state=False,
               go_backwards=False,
               stateful=False,
               time_major=False,
               **kwargs):
    super(RNN, self).__init__(**kwargs)  # pylint: disable=bad-super-call
    self.units = units
    cell_spec = collections.namedtuple('cell', ['state_size', 'output_size'])
    self.cell = cell_spec(
        state_size=(self.units, self.units), output_size=self.units)
    self.activation = activations.get(activation)
    self.recurrent_activation = activations.get(recurrent_activation)
    self.kernel_initializer = initializers.get(kernel_initializer)
    self.recurrent_initializer = initializers.get(recurrent_initializer)
    self.bias_initializer = initializers.get(bias_initializer)
    self.unit_forget_bias = unit_forget_bias

    self.kernel_regularizer = regularizers.get(kernel_regularizer)
    self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
    self.bias_regularizer = regularizers.get(bias_regularizer)
    self.activity_regularizer = regularizers.get(activity_regularizer)

    self.kernel_constraint = constraints.get(kernel_constraint)
    self.recurrent_constraint = constraints.get(recurrent_constraint)
    self.bias_constraint = constraints.get(bias_constraint)

    self.return_sequences = return_sequences
    self.return_state = return_state
    self.go_backwards = go_backwards
    self.stateful = stateful
    self.time_major = time_major
    self._num_constants = None
    self._num_inputs = None
    self._states = None
    self.input_spec = [InputSpec(ndim=3)]
    self.state_spec = [
        InputSpec(shape=(None, dim)) for dim in (self.units, self.units)
    ]
  def __init__(self,
               filters,
               kernel_size,
               strides=(1, 1),
               padding='valid',
               data_format=None,
               dilation_rate=(1, 1),
               activation='tanh',
               recurrent_activation='hard_sigmoid',
               use_bias=True,
               kernel_initializer='glorot_uniform',
               recurrent_initializer='orthogonal',
               bias_initializer='zeros',
               unit_forget_bias=True,
               kernel_regularizer=None,
               recurrent_regularizer=None,
               bias_regularizer=None,
               kernel_constraint=None,
               recurrent_constraint=None,
               bias_constraint=None,
               dropout=0.,
               recurrent_dropout=0.,
               **kwargs):
    super(ConvLSTM2DCell, self).__init__(**kwargs)
    self.filters = filters
    self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
    self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
    self.padding = conv_utils.normalize_padding(padding)
    self.data_format = conv_utils.normalize_data_format(data_format)
    self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2,
                                                    'dilation_rate')
    self.activation = activations.get(activation)
    self.recurrent_activation = activations.get(recurrent_activation)
    self.use_bias = use_bias

    self.kernel_initializer = initializers.get(kernel_initializer)
    self.recurrent_initializer = initializers.get(recurrent_initializer)
    self.bias_initializer = initializers.get(bias_initializer)
    self.unit_forget_bias = unit_forget_bias

    self.kernel_regularizer = regularizers.get(kernel_regularizer)
    self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
    self.bias_regularizer = regularizers.get(bias_regularizer)

    self.kernel_constraint = constraints.get(kernel_constraint)
    self.recurrent_constraint = constraints.get(recurrent_constraint)
    self.bias_constraint = constraints.get(bias_constraint)

    self.dropout = min(1., max(0., dropout))
    self.recurrent_dropout = min(1., max(0., recurrent_dropout))
    self.state_size = (self.filters, self.filters)
    self._dropout_mask = None
    self._recurrent_dropout_mask = None
Esempio n. 7
0
 def add_slot(self, var, slot_name, initializer="zeros"):
     """Add a new slot variable for `var`."""
     if slot_name not in self._slot_names:
         self._slot_names.append(slot_name)
     var_key = _var_key(var)
     slot_dict = self._slots.setdefault(var_key, {})
     weight = slot_dict.get(slot_name, None)
     if weight is None:
         if isinstance(initializer,
                       six.string_types) or callable(initializer):
             initializer = initializers.get(initializer)
             weight = slot_creator.create_slot_with_initializer(
                 var,
                 initializer,
                 shape=var.shape,
                 dtype=var.dtype,
                 name=slot_name)
         else:
             weight = slot_creator.create_slot(var, initializer, slot_name)
         backend.track_variable(weight)
         slot_dict[slot_name] = weight
         self._restore_slot_variable(slot_name=slot_name,
                                     variable=var,
                                     slot_variable=weight)
         self._weights.append(weight)
     return weight
Esempio n. 8
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 embeddings_initializer='uniform',
                 embeddings_regularizer=None,
                 activity_regularizer=None,
                 embeddings_constraint=None,
                 mask_zero=False,
                 input_length=None,
                 **kwargs):
        if 'input_shape' not in kwargs:
            if input_length:
                kwargs['input_shape'] = (input_length, )
            else:
                kwargs['input_shape'] = (None, )
        dtype = kwargs.pop('dtype', K.floatx())
        super(Embedding, self).__init__(dtype=dtype, **kwargs)

        self.input_dim = input_dim
        self.output_dim = output_dim
        self.embeddings_initializer = initializers.get(embeddings_initializer)
        self.embeddings_regularizer = regularizers.get(embeddings_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.embeddings_constraint = constraints.get(embeddings_constraint)
        self.mask_zero = mask_zero
        self.supports_masking = mask_zero
        self.input_length = input_length
        self._can_use_graph_functions = True
Esempio n. 9
0
    def build(self, input_shape):
        """Build `Layer`"""
        input_shape = tensor_shape.TensorShape(input_shape).as_list()
        self.input_spec = InputSpec(shape=input_shape)

        if not self.layer._built:
            self.layer.build(input_shape)
            self.layer._built = False

            if not hasattr(self.layer, 'kernel'):
                raise ValueError('`WeightNorm` must wrap a layer that'
                                 ' contains a `kernel` for weights')

            # The kernel's filter or unit dimension is -1
            self.layer_depth = int(self.layer.W.shape[-1])
            self.norm_axes = list(range(self.layer.W.shape.ndims - 1))

            self.layer.v = self.layer.W
            self.layer.g = self.layer.add_variable(
                name="g",
                shape=(self.layer_depth, ),
                initializer=initializers.get('ones'),
                dtype=self.layer.kernel.dtype,
                trainable=True)

            with ops.control_dependencies(
                [self.layer.g.assign(self._init_norm(self.layer.v))]):
                self._compute_weights()

            self.layer.built = True

        super(WeightNorm, self).build()
        self._built = True
Esempio n. 10
0
 def __init__(self, shape, kernel_initializer='glorot_uniform'):
     super(InputLayer, self).__init__()
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.init_embeds = self.add_weight('embedding',
                                        shape=shape,
                                        dtype='float32',
                                        trainable=True)
Esempio n. 11
0
  def _add_weight(self,
                  name,
                  shape=(),
                  dtype=None,
                  initializer='zeros'):
    """Adds a weight to this loss scale.

    Args:
      name: Variable name.
      shape: Variable shape.
      dtype: The type of the variable.
      initializer: The initializer to use.

    Returns:
      A variable.
    """
    if isinstance(initializer, six.string_types) or callable(initializer):
      initializer = initializers.get(initializer)
    variable = self._add_variable_with_custom_getter(
        name=name,
        shape=shape,
        getter=base_layer_utils.make_variable,
        overwrite=True,
        initializer=initializer,
        dtype=dtype,
        trainable=False,
        use_resource=True,
        synchronization=variables.VariableSynchronization.AUTO,
        # Set aggregation to NONE, as loss scaling variables should never be
        # aggregated.
        aggregation=variables.VariableAggregation.NONE)
    backend.track_variable(variable)
    return variable
Esempio n. 12
0
 def __init__(self,
              alpha_initializer="zeros",
              data_format="channels_last",
              **kwargs):
     super(ScaleBlock, self).__init__(**kwargs)
     self.data_format = data_format
     self.alpha_initializer = initializers.get(alpha_initializer)
Esempio n. 13
0
 def add_slot(self, var, slot_name, initializer="zeros"):
   """Add a new slot variable for `var`."""
   if slot_name not in self._slot_names:
     self._slot_names.append(slot_name)
   var_key = _var_key(var)
   slot_dict = self._slots.setdefault(var_key, {})
   weight = slot_dict.get(slot_name, None)
   if weight is None:
     if isinstance(initializer, six.string_types) or callable(initializer):
       initializer = initializers.get(initializer)
       initial_value = functools.partial(
           initializer, shape=var.shape, dtype=var.dtype)
     else:
       initial_value = initializer
     weight = tf_variables.Variable(
         name="%s/%s" % (var._shared_name, slot_name),  # pylint: disable=protected-access
         dtype=var.dtype,
         trainable=False,
         initial_value=initial_value)
     backend.track_variable(weight)
     slot_dict[slot_name] = weight
     self._restore_slot_variable(
         slot_name=slot_name, variable=var,
         slot_variable=weight)
     self._weights.append(weight)
   return weight
Esempio n. 14
0
    def build(self, input_shape):
        input_dim = input_shape[-1]
        self.w = self.add_weight(
            shape=(input_dim, self.units * 4),
            name='kernel',
            initializer=initializers.get('glorot_uniform'))

        self.u = self.add_weight(shape=(self.units, self.units * 4),
                                 name='recurrent_kernel',
                                 initializer=initializers.get('orthogonal'))
        self.bias = self.add_weight(shape=(self.units * 4),
                                    name='bias',
                                    initializer=initializers.get('zeros'))

        self.recurrent_activation = activations.get('hard_sigmoid')
        self.activation = activations.get('tanh')
Esempio n. 15
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 embeddings_initializer='uniform',
                 embeddings_regularizer=None,
                 activity_regularizer=None,
                 embeddings_constraint=None,
                 mask_zero=False,
                 input_length=None,
                 **kwargs):
        if 'input_shape' not in kwargs:
            if input_length:
                kwargs['input_shape'] = (input_length, )
            else:
                kwargs['input_shape'] = (None, )
        dtype = kwargs.pop('dtype', K.floatx())
        # We set autocast to False, as we do not want to cast floating- point inputs
        # to self.dtype. In call(), we cast to int32, and casting to self.dtype
        # before casting to int32 might cause the int32 values to be different due
        # to a loss of precision.
        kwargs['autocast'] = False
        super(Embedding, self).__init__(dtype=dtype, **kwargs)

        self.input_dim = input_dim
        self.output_dim = output_dim
        self.embeddings_initializer = initializers.get(embeddings_initializer)
        self.embeddings_regularizer = regularizers.get(embeddings_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.embeddings_constraint = constraints.get(embeddings_constraint)
        self.mask_zero = mask_zero
        self.supports_masking = mask_zero
        self.input_length = input_length
        self._supports_ragged_inputs = True
Esempio n. 16
0
 def add_slot(self, var, slot_name, initializer="zeros"):
     """Add a new slot variable for `var`."""
     if slot_name not in self._slot_names:
         self._slot_names.append(slot_name)
     var_key = _var_key(var)
     slot_dict = self._slots.setdefault(var_key, {})
     weight = slot_dict.get(slot_name, None)
     if weight is None:
         if isinstance(initializer,
                       six.string_types) or callable(initializer):
             initializer = initializers.get(initializer)
             initial_value = functools.partial(initializer,
                                               shape=var.shape,
                                               dtype=var.dtype)
         else:
             initial_value = initializer
         strategy = distribute_ctx.get_strategy()
         with strategy.extended.colocate_vars_with(var):
             weight = tf_variables.Variable(
                 name="%s/%s" % (var._shared_name, slot_name),  # pylint: disable=protected-access
                 dtype=var.dtype,
                 trainable=False,
                 initial_value=initial_value)
         backend.track_variable(weight)
         slot_dict[slot_name] = weight
         self._restore_slot_variable(slot_name=slot_name,
                                     variable=var,
                                     slot_variable=weight)
         self._weights.append(weight)
     return weight
Esempio n. 17
0
    def __init__(self,
                 step_dim,
                 ll,
                 get_alpha=False,
                 get_sequence=False,
                 W_regularizer=None,
                 b_regularizer=None,
                 L_regularizer=None,
                 W_constraint=None,
                 b_constraint=None,
                 L_constraint=None,
                 bias=False,
                 **kwargs):
        self.supports_masking = True
        self.init = initializers.get('glorot_uniform')
        self.l_init = initializers.constant(value=0.5)
        self.ll = ll
        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.L_regularizer = regularizers.get(L_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.L_constraint = constraints.get(L_constraint)
        self.get_sequence = get_sequence
        self.bias = bias
        self.step_dim = step_dim
        self.features_dim = 0
        self.get_alpha = get_alpha
        super(Test_IAN, self).__init__(**kwargs)
Esempio n. 18
0
  def __init__(self,
               input_dim,
               output_dim,
               embeddings_initializer='uniform',
               embeddings_regularizer=None,
               activity_regularizer=None,
               embeddings_constraint=None,
               mask_zero=False,
               input_length=None,
               **kwargs):
    if 'input_shape' not in kwargs:
      if input_length:
        kwargs['input_shape'] = (input_length,)
      else:
        kwargs['input_shape'] = (None,)
    dtype = kwargs.pop('dtype', K.floatx())
    super(Embedding, self).__init__(dtype=dtype, **kwargs)

    self.input_dim = input_dim
    self.output_dim = output_dim
    self.embeddings_initializer = initializers.get(embeddings_initializer)
    self.embeddings_regularizer = regularizers.get(embeddings_regularizer)
    self.activity_regularizer = regularizers.get(activity_regularizer)
    self.embeddings_constraint = constraints.get(embeddings_constraint)
    self.mask_zero = mask_zero
    self.supports_masking = mask_zero
    self.input_length = input_length
Esempio n. 19
0
  def _add_weight(self,
                  name,
                  shape=(),
                  dtype=None,
                  initializer='zeros'):
    """Adds a weight to this loss scale.

    Args:
      name: Variable name.
      shape: Variable shape.
      dtype: The type of the variable.
      initializer: The initializer to use.

    Returns:
      A variable.
    """
    if isinstance(initializer, six.string_types) or callable(initializer):
      initializer = initializers.get(initializer)
    variable = self._add_variable_with_custom_getter(
        name=name,
        shape=shape,
        getter=base_layer_utils.make_variable,
        overwrite=True,
        initializer=initializer,
        dtype=dtype,
        trainable=False,
        use_resource=True,
        synchronization=variables.VariableSynchronization.AUTO,
        # Set aggregation to NONE, as loss scaling variables should never be
        # aggregated.
        aggregation=variables.VariableAggregation.NONE)
    backend.track_variable(variable)
    return variable
 def __init__(self,
              filters,
              kernel_size,
              strides=(1, 1),
              padding='valid',
              output_padding=None,
              data_format=None,
              dilation_rate=(1, 1),
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              tied_to=None,
              **kwargs):
     super(Conv2DTranspose, self).__init__(
         filters=filters,
         kernel_size=kernel_size,
         strides=strides,
         padding=padding,
         data_format=data_format,
         dilation_rate=dilation_rate,
         activation=activations.get(activation),
         use_bias=use_bias,
         kernel_initializer=initializers.get(kernel_initializer),
         bias_initializer=initializers.get(bias_initializer),
         kernel_regularizer=regularizers.get(kernel_regularizer),
         bias_regularizer=regularizers.get(bias_regularizer),
         activity_regularizer=regularizers.get(activity_regularizer),
         kernel_constraint=constraints.get(kernel_constraint),
         bias_constraint=constraints.get(bias_constraint),
         **kwargs)
     self.tied_to = tied_to
     self.output_padding = output_padding
     if self.output_padding is not None:
         self.output_padding = conv_utils.normalize_tuple(
             self.output_padding, 2, 'output_padding')
         for stride, out_pad in zip(self.strides, self.output_padding):
             if out_pad >= stride:
                 raise ValueError('Stride ' + str(self.strides) +
                                  ' must be '
                                  'greater than output padding ' +
                                  str(self.output_padding))
Esempio n. 21
0
    def __init__(self,
                 filters,
                 kernel_size,
                 strides=1,
                 padding="valid",
                 output_padding=None,
                 data_format=None,
                 dilation_rate=1,
                 activation=None,
                 use_bias=True,
                 kernel_initializer="glorot_uniform",
                 bias_initializer="zeros",
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        if output_padding is not None:
            output_padding = (1, output_padding)
        else:
            output_padding = extract_singleton(output_padding)

        kernel_size = extract_singleton(kernel_size)
        strides = extract_singleton(strides)
        dilation_rate = extract_singleton(dilation_rate)

        super(Conv1DTranspose, self).__init__(
            filters=filters,
            kernel_size=(1, kernel_size),
            strides=(1, strides),
            padding=padding,
            output_padding=output_padding,
            data_format=data_format,
            dilation_rate=(1, dilation_rate),
            activation=activations.get(activation),
            use_bias=use_bias,
            kernel_initializer=initializers.get(kernel_initializer),
            bias_initializer=initializers.get(bias_initializer),
            kernel_regularizer=regularizers.get(kernel_regularizer),
            bias_regularizer=regularizers.get(bias_regularizer),
            activity_regularizer=regularizers.get(activity_regularizer),
            kernel_constraint=constraints.get(kernel_constraint),
            bias_constraint=constraints.get(bias_constraint),
            **kwargs)

        self._input_dim = None
Esempio n. 22
0
 def __init__(self,
              rank,
              filters,
              kernel_size,
              strides=1,
              padding='valid',
              data_format=None,
              dilation_rate=1,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              use_spectral_norm=False,
              is_training=False,
              trainable=True,
              name=None,
              **kwargs):
     super(MyKerasConv, self).__init__(
         trainable=trainable,
         name=name,
         activity_regularizer=regularizers.get(activity_regularizer),
         **kwargs)
     self.rank = rank
     self.filters = filters
     self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                   'kernel_size')
     self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
     self.padding = conv_utils.normalize_padding(padding)
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.dilation_rate = conv_utils.normalize_tuple(
         dilation_rate, rank, 'dilation_rate')
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.use_spectral_norm = use_spectral_norm
     self.is_training = is_training
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(ndim=self.rank + 2)
Esempio n. 23
0
 def __init__(self,
              rank,
              filters,
              kernel_size,
              strides=1,
              padding='valid',
              data_format=None,
              dilation_rate=1,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              trainable=True,
              name=None,
              **kwargs):
     super(ConvAux, self).__init__(
         trainable=trainable,
         name=name,
         activity_regularizer=regularizers.get(activity_regularizer),
         **kwargs)
     self.rank = rank
     if filters is not None and not isinstance(filters, int):
         filters = int(filters)
     self.filters = filters
     self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                   'kernel_size')
     self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
     self.padding = conv_utils.normalize_padding(padding)
     if (self.padding == 'causal'):
         raise ValueError('Causal padding is only supported.')
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.dilation_rate = conv_utils.normalize_tuple(
         dilation_rate, rank, 'dilation_rate')
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(ndim=self.rank + 2)
Esempio n. 24
0
 def __init__(
         self,
         filters,
         kernel_size,
         strides=1,
         padding='valid',
         output_padding=None,
         data_format=None,
         activation=None,
         use_bias=True,
         kernel_initializer='glorot_uniform',
         bias_initializer='zeros',
         kernel_regularizer=None,
         bias_regularizer=None,
         activity_regularizer=None,
         kernel_constraint=None,
         bias_constraint=None,
         **kwargs,
     ):
     super().__init__(
         filters=filters,
         kernel_size=kernel_size,
         strides=strides,
         padding=padding,
         data_format=data_format,
         activation=activations.get(activation),
         use_bias=use_bias,
         kernel_initializer=initializers.get(kernel_initializer),
         bias_initializer=initializers.get(bias_initializer),
         kernel_regularizer=regularizers.get(kernel_regularizer),
         bias_regularizer=regularizers.get(bias_regularizer),
         activity_regularizer=regularizers.get(activity_regularizer),
         kernel_constraint=constraints.get(kernel_constraint),
         bias_constraint=constraints.get(bias_constraint),
         **kwargs,
     )
     self.output_padding = output_padding
     if self.output_padding is not None:
         self.output_padding = conv_utils.normalize_tuple(
             self.output_padding, 1, 'output_padding',
         )
         for stride, out_pad in zip(self.strides, self.output_padding):
             if out_pad >= stride:
                 raise ValueError(
                     f'Stride {self.strides} must be greater than '
                     f'output padding {self.output_padding}',
                 )
Esempio n. 25
0
    def __init__(
        self,
        units,
        activation            = complex_activations.complex_amp_phase_exp,
        use_bias              = True,
        kernel_initializer    = complex_initializers.ComplexGlorotNormal,
        recurrent_initializer = complex_initializers.ComplexGlorotNormal,
        bias_initializer      = 'zeros',
        kernel_regularizer    = None,
        recurrent_regularizer = None,
        bias_regularizer      = None,
        activity_regularizer  = None,
        kernel_constraint     = None,
        recurrent_constraint  = None,
        bias_constraint       = None,
        dropout               = 0.,
        recurrent_dropout     = 0.,
        return_sequences      = False,
        return_state          = False,
        go_backwards          = False,
        stateful              = False,
        unroll                = False,
        **kwargs
    ):

        super(SimpleComplexRNNCell, self).__init__(**kwargs)

        self.units      = units
        self.activation = activations.get(activation)
        self.use_bias   = use_bias

        self.kernel_initializer    = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer      = initializers.get(bias_initializer)

        self.kernel_regularizer    = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer      = regularizers.get(bias_regularizer)

        self.kernel_constraint    = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint      = constraints.get(bias_constraint)

        self.dropout           = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
        self.state_size        = self.units
        self.output_size       = self.units
Esempio n. 26
0
    def build(self, input_shape):
        super(PruneLowMagnitude, self).build(input_shape)

        weight_vars, mask_vars, threshold_vars = [], [], []

        self.prunable_weights = self.layer.get_prunable_weights()

        # For each of the prunable weights, add mask and threshold variables
        for weight in self.prunable_weights:
            mask = self.add_variable(
                'mask',
                shape=weight.shape,
                initializer=initializers.get('ones'),
                dtype=weight.dtype,
                trainable=False,
                aggregation=tf_variables.VariableAggregation.MEAN)
            threshold = self.add_variable(
                'threshold',
                shape=[],
                initializer=initializers.get('zeros'),
                dtype=weight.dtype,
                trainable=False,
                aggregation=tf_variables.VariableAggregation.MEAN)

            weight_vars.append(weight)
            mask_vars.append(mask)
            threshold_vars.append(threshold)
        self.pruning_vars = list(zip(weight_vars, mask_vars, threshold_vars))

        # Add a scalar tracking the number of updates to the wrapped layer.
        self.pruning_step = self.add_variable(
            'pruning_step',
            shape=[],
            initializer=initializers.Constant(-1),
            dtype=dtypes.int64,
            trainable=False)

        def training_step_fn():
            return self.pruning_step

        # Create a pruning object
        self.pruning_obj = pruning_impl.Pruning(
            training_step_fn=training_step_fn,
            pruning_vars=self.pruning_vars,
            pruning_schedule=self.pruning_schedule,
            block_size=self.block_size,
            block_pooling_type=self.block_pooling_type)
Esempio n. 27
0
def _get_initializer(name):
    if name in ['vs_fan_avg_uniform', 'trfk_init']:
        return initializers.VarianceScaling(mode='fan_avg',
                                            distribution='uniform')
    elif name == 'vs_fan_avg_normal':
        return initializers.VarianceScaling(mode='fan_avg',
                                            distribution='normal')
    return initializers.get(name)
Esempio n. 28
0
    def __init__(self,
                 nb_filter,
                 n_atom_features,
                 batch_size,
                 init='glorot_uniform',
                 activation='linear',
                 dropout=None,
                 max_deg=10,
                 min_deg=0,
                 **kwargs):
        """
        Parameters
        ----------
        nb_filter: int
          Number of convolutional filters.
        n_atom_features: int
          Number of features listed per atom.
        init: str, optional
          Weight initialization for filters.
        activation: str, optional
          Activation function applied after convolution.
        dropout: float, optional
          Dropout probability.
        max_deg: int, optional
          Maximum degree of atoms in molecules.
        min_deg: int, optional
          Minimum degree of atoms in molecules.
        """
        super(GraphConv_and_gather, self).__init__(**kwargs)
        self.n_atom_features = n_atom_features
        self.init = initializers.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.nb_filter = nb_filter  # Save number of filters
        self.dropout = dropout  # Save dropout params
        self.max_deg = max_deg
        self.min_deg = min_deg
        self.batch_size = batch_size
        # Is there a solid explanation here?
        self.nb_affine = 3 * max_deg + (2 - min_deg)
        self.n_atom_features = n_atom_features
        n_atom_features = self.n_atom_features

        self.beta_init = initializers.get('zero')
        self.gamma_init = initializers.get('one')
        self.epsilon = 1e-5
        self.momentum = 0.99
Esempio n. 29
0
    def __init__(self,
                 units,
                 k,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 **kwargs):

        super(Maxout, self).__init__(**kwargs)

        self.units = int(units) if not isinstance(units, int) else units
        self.k = int(k) if not isinstance(k, int) else k
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
Esempio n. 30
0
 def test_initializer_v2_get(self):
   tf2_force_enabled = tf2._force_enable  # pylint: disable=protected-access
   try:
     tf2.enable()
     rn = initializers.get('random_normal')
     self.assertIn('init_ops_v2', rn.__class__.__module__)
   finally:
     tf2._force_enable = tf2_force_enabled  # pylint: disable=protected-access
Esempio n. 31
0
  def __init__(self,
               norm_axis=None,
               params_axis=-1,
               epsilon=1e-12,
               center=True,
               scale=True,
               beta_initializer='zeros',
               gamma_initializer='ones',
               beta_regularizer=None,
               gamma_regularizer=None,
               beta_constraint=None,
               gamma_constraint=None,
               trainable=True,
               name=None,
               **kwargs):
    super(LayerNormalization, self).__init__(
        name=name, trainable=trainable, **kwargs)
    if isinstance(norm_axis, list):
      self.norm_axis = norm_axis[:]
    elif isinstance(norm_axis, int):
      self.norm_axis = norm_axis
    elif norm_axis is None:
      self.norm_axis = None
    else:
      raise TypeError('norm_axis must be int or list or None, type given: %s'
                      % type(norm_axis))

    if isinstance(params_axis, list):
      self.params_axis = params_axis[:]
    elif isinstance(params_axis, int):
      self.params_axis = params_axis
    else:
      raise TypeError('params_axis must be int or list, type given: %s'
                      % type(params_axis))

    self.epsilon = epsilon
    self.center = center
    self.scale = scale
    self.beta_initializer = initializers.get(beta_initializer)
    self.gamma_initializer = initializers.get(gamma_initializer)
    self.beta_regularizer = regularizers.get(beta_regularizer)
    self.gamma_regularizer = regularizers.get(gamma_regularizer)
    self.beta_constraint = constraints.get(beta_constraint)
    self.gamma_constraint = constraints.get(gamma_constraint)

    self.supports_masking = True
Esempio n. 32
0
    def __init__(self,
                 units,
                 activation=None,
                 use_bias=True,
                 scale=True,
                 scf_min=0.2,
                 scf_max=2.0,
                 dropconnect_prob=0.05,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'), )

        super(ScaledLinear, self).__init__(
            activity_regularizer=regularizers.get(activity_regularizer),
            **kwargs)

        # Save params
        self.units = units
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.dropconnect_prob = dropconnect_prob
        self.scale = scale
        self.scf_min = scf_min
        self.scf_max = scf_max
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.kwargs = kwargs

        # Initialize scaling factor
        self.scaler = ScalingFactor(scf_min=self.scf_min,
                                    scf_max=self.scf_max,
                                    name="scaling_factor")

        self.supports_masking = True
        self.input_spec = InputSpec(min_ndim=2)
Esempio n. 33
0
  def __init__(self,
               norm_axis=None,
               params_axis=-1,
               epsilon=1e-12,
               center=True,
               scale=True,
               beta_initializer='zeros',
               gamma_initializer='ones',
               beta_regularizer=None,
               gamma_regularizer=None,
               beta_constraint=None,
               gamma_constraint=None,
               trainable=True,
               name=None,
               **kwargs):
    super(LayerNormalization, self).__init__(
        name=name, trainable=trainable, **kwargs)
    if isinstance(norm_axis, list):
      self.norm_axis = norm_axis[:]
    elif isinstance(norm_axis, int):
      self.norm_axis = norm_axis
    elif norm_axis is None:
      self.norm_axis = None
    else:
      raise TypeError('norm_axis must be int or list or None, type given: %s'
                      % type(norm_axis))

    if isinstance(params_axis, list):
      self.params_axis = params_axis[:]
    elif isinstance(params_axis, int):
      self.params_axis = params_axis
    else:
      raise TypeError('params_axis must be int or list, type given: %s'
                      % type(params_axis))

    self.epsilon = epsilon
    self.center = center
    self.scale = scale
    self.beta_initializer = initializers.get(beta_initializer)
    self.gamma_initializer = initializers.get(gamma_initializer)
    self.beta_regularizer = regularizers.get(beta_regularizer)
    self.gamma_regularizer = regularizers.get(gamma_regularizer)
    self.beta_constraint = constraints.get(beta_constraint)
    self.gamma_constraint = constraints.get(gamma_constraint)

    self.supports_masking = True
Esempio n. 34
0
    def __init__(self,
                 units,
                 tau=1.,
                 activation='tanh',
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 recurrent_constraint=None,
                 bias_constraint=None,
                 dropout=0.,
                 recurrent_dropout=0.,
                 **kwargs):
        super(SimpleCTRNNCell, self).__init__(**kwargs)

        self.units = units
        self.tau = tau
        self.taus = array_ops.constant(tau,
                                       dtype=self.dtype,
                                       shape=[self.units],
                                       name="taus")
        self.activation = activations.get(activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
        self.state_size = (self.units, self.units, self.units)
        self.output_size = self.units
        self._dropout_mask = None
        self._recurrent_dropout_mask = None
Esempio n. 35
0
    def __init__(self,
                 units,
                 control_units,
                 activation='tanh',
                 recurrent_activation='hard_sigmoid',
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 unit_forget_bias=True,
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 recurrent_constraint=None,
                 bias_constraint=None,
                 dropout=0.,
                 recurrent_dropout=0.,
                 implementation=1,
                 **kwargs):
        super(SCLSTMCell, self).__init__(**kwargs)
        self.units = units
        self.control_units = control_units
        self.activation = activations.get(activation)
        self.recurrent_activation = activations.get(recurrent_activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.unit_forget_bias = unit_forget_bias

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
        self.implementation = implementation
        self.state_size = (self.units, self.units, self.control_units)
        self._dropout_mask = None
        self._recurrent_dropout_mask = None
Esempio n. 36
0
    def add_weight(self,
                   name=None,
                   shape=None,
                   trainable=None,
                   partitioner=None,
                   initializer=None,
                   regularizer=None,
                   constraint=None,
                   dtype=None,
                   use_wnorm=False,
                   use_resource=None,
                   **kwargs):
        if initializer is not None:
            self.initializers[name] = initializers.get(initializer)
        if regularizer is not None:
            self.regularizers[name] = regularizers.get(regularizer)
        if constraint is not None:
            self.constraints[name] = constraints.get(constraint)
        _initializer = self.get_initializer(name)

        if use_wnorm or (self.wnorm and (name in ['kernel', 'embedding']
                                         or name.endswith('kernel'))):
            if name in self.normalizers and self.normalizers[name] is not None:
                self.normalizers[name] = weight_normalizers.WeightNormalizer(
                    _initializer, next_layer=self.normalizers[name])
            else:
                self.normalizers[name] = weight_normalizers.WeightNormalizer(
                    _initializer)

        if self.use_wscale:
            _initializer = _WscaleInitializer(_initializer, lrmul=self.lrmul)
            self.initializers[name] = _initializer
            if name in self.normalizers and self.normalizers[name] is not None:
                self.normalizers[name] = weight_normalizers.WscaleNormalizer(
                    next_layer=self.normalizers[name],
                    lrmul=self.lrmul,
                    gain=self.gain)
            else:
                self.normalizers[name] = weight_normalizers.WscaleNormalizer(
                    lrmul=self.lrmul, gain=self.gain)
        if dtype is None:
            dtype = self.dtype or K.floatx()
        weight = super(DynastesBaseLayer,
                       self).add_weight(name=name,
                                        shape=shape,
                                        initializer=_initializer,
                                        regularizer=self.get_regularizer(name),
                                        trainable=trainable,
                                        constraint=self.get_constraint(name),
                                        partitioner=partitioner,
                                        use_resource=use_resource,
                                        dtype=dtype,
                                        **kwargs)
        if name in self.normalizers:
            if self.normalizers[name] is not None:
                self.normalizers[name].build(shape)
        self.weights_dict[name] = weight
        return weight
Esempio n. 37
0
    def __init__(self,
                 num_units,
                 gate_mod=None,
                 ngram=False,
                 no_feedback=False,
                 use_peepholes=False,
                 cell_clip=None,
                 initializer=None,
                 num_proj=None,
                 proj_clip=None,
                 num_unit_shards=None,
                 num_proj_shards=None,
                 forget_bias=1.0,
                 state_is_tuple=True,
                 layer_norm=False,
                 activation=None,
                 reuse=None,
                 name=None,
                 dtype=None,
                 **kwargs):

        super(LSTMCell_mod, self).__init__(_reuse=reuse,
                                           name=name,
                                           dtype=dtype,
                                           **kwargs)

        print("LSTM cell mode: {0}".format(gate_mod))

        # Inputs must be 2-dimensional.
        self.input_spec = base_layer.InputSpec(ndim=2)

        self._num_units = num_units
        self._gate_mod = gate_mod
        self._ngram = ngram
        self._no_feedback = no_feedback
        self._use_peepholes = use_peepholes
        self._cell_clip = cell_clip
        self._initializer = initializers.get(initializer)
        self._num_proj = num_proj
        self._proj_clip = proj_clip
        self._num_unit_shards = num_unit_shards
        self._num_proj_shards = num_proj_shards
        self._forget_bias = forget_bias
        self._state_is_tuple = state_is_tuple
        self._layer_norm = layer_norm
        if activation:
            self._activation = activations.get(activation)
        else:
            self._activation = math_ops.tanh

        if num_proj:
            self._state_size = (LSTMStateTuple(num_units, num_proj)
                                if state_is_tuple else num_units + num_proj)
            self._output_size = num_proj
        else:
            self._state_size = (LSTMStateTuple(num_units, num_units)
                                if state_is_tuple else 2 * num_units)
            self._output_size = num_units
Esempio n. 38
0
    def __init__(self,
                 axis=-1,
                 momentum=0.9,
                 epsilon=1e-5,
                 m_per_group=0,
                 affine=True,
                 beta_initializer='zeros',
                 gamma_initializer='ones',
                 moving_mean_initializer='zeros',
                 moving_projection_initializer='identity',
                 beta_regularizer=None,
                 gamma_regularizer=None,
                 beta_constraint=None,
                 gamma_constraint=None,
                 trainable=True,
                 name=None,
                 **kwargs):

        super(DecorrelatedBN, self).__init__(name=name,
                                             trainable=trainable,
                                             **kwargs)
        if isinstance(axis, int):
            self.axis = axis
        else:
            raise TypeError('axis must be int, type given: %s' %
                            type(self.axis))
        self.momentum = momentum
        self.epsilon = epsilon
        self.m_per_group = m_per_group
        self.affine = affine
        self.beta_initializer = initializers.get(beta_initializer)
        self.gamma_initializer = initializers.get(gamma_initializer)
        self.moving_mean_initializer = initializers.get(
            moving_mean_initializer)
        self.moving_projection_initializer = initializers.get(
            moving_projection_initializer)
        self.beta_regularizer = regularizers.get(beta_regularizer)
        self.gamma_regularizer = regularizers.get(gamma_regularizer)
        self.beta_constraint = constraints.get(beta_constraint)
        self.gamma_constraint = constraints.get(gamma_constraint)

        self.moving_means = []
        self.moving_projections = []

        self._trainable_var = None
Esempio n. 39
0
    def __init__(self,
                 norm_method='std',
                 filter_size=61,
                 data_format=None,
                 activation=None,
                 use_bias=False,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        self.valid_modes = {'std', 'max', None, 'whole_image'}
        if norm_method not in self.valid_modes:
            raise ValueError('Invalid `norm_method`: "{}". '
                             'Use one of {}.'.format(norm_method,
                                                     self.valid_modes))
        if 'trainable' not in kwargs:
            kwargs['trainable'] = False
        super(ImageNormalization2D, self).__init__(
            activity_regularizer=regularizers.get(activity_regularizer),
            **kwargs)
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(ndim=4)  # hardcoded for 2D data

        self.filter_size = filter_size
        self.norm_method = norm_method
        self.data_format = conv_utils.normalize_data_format(data_format)

        if self.data_format == 'channels_first':
            self.channel_axis = 1
        else:
            self.channel_axis = 3  # hardcoded for 2D data

        if isinstance(self.norm_method, str):
            self.norm_method = self.norm_method.lower()
Esempio n. 40
0
    def __init__(
            self,
            input_rows,
            input_dim,
            output_dim,
            support,  # input_rows x input_rows
            activation=None,
            use_bias=False,
            kernel_initializer='glorot_uniform',
            bias_initializer='zeros',
            kernel_regularizer=None,
            bias_regularizer=None,
            kernel_constraint=None,
            bias_constraint=None,
            dropout=0.,
            sparse_inputs=False,
            featureless=False,
            model='gcn',
            perturbation=None,
            **kwargs):
        super(GraphConvolution, self).__init__(**kwargs)

        self.input_rows = input_rows
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.support = support

        self.activation = activation
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.dropout = dropout
        self.sparse_inputs = sparse_inputs
        self.featureless = featureless
        self.model = model
        self.perturbation = perturbation

        self.supports_masking = True
        self.input_spec = InputSpec(min_ndim=2)
Esempio n. 41
0
  def __init__(self,
               units,
               kernel_initializer='glorot_uniform',
               recurrent_initializer='orthogonal',
               bias_initializer='zeros',
               unit_forget_bias=True,
               kernel_regularizer=None,
               recurrent_regularizer=None,
               bias_regularizer=None,
               activity_regularizer=None,
               kernel_constraint=None,
               recurrent_constraint=None,
               bias_constraint=None,
               return_sequences=False,
               return_state=False,
               go_backwards=False,
               stateful=False,
               **kwargs):
    self.units = units
    cell_spec = collections.namedtuple('cell', 'state_size')
    self._cell = cell_spec(state_size=(self.units, self.units))
    super(CuDNNLSTM, self).__init__(
        return_sequences=return_sequences,
        return_state=return_state,
        go_backwards=go_backwards,
        stateful=stateful,
        **kwargs)

    self.kernel_initializer = initializers.get(kernel_initializer)
    self.recurrent_initializer = initializers.get(recurrent_initializer)
    self.bias_initializer = initializers.get(bias_initializer)
    self.unit_forget_bias = unit_forget_bias

    self.kernel_regularizer = regularizers.get(kernel_regularizer)
    self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
    self.bias_regularizer = regularizers.get(bias_regularizer)
    self.activity_regularizer = regularizers.get(activity_regularizer)

    self.kernel_constraint = constraints.get(kernel_constraint)
    self.recurrent_constraint = constraints.get(recurrent_constraint)
    self.bias_constraint = constraints.get(bias_constraint)
Esempio n. 42
0
 def __init__(self,
              alpha_initializer='zeros',
              alpha_regularizer=None,
              alpha_constraint=None,
              shared_axes=None,
              **kwargs):
   super(PReLU, self).__init__(**kwargs)
   self.supports_masking = True
   self.alpha_initializer = initializers.get(alpha_initializer)
   self.alpha_regularizer = regularizers.get(alpha_regularizer)
   self.alpha_constraint = constraints.get(alpha_constraint)
   if shared_axes is None:
     self.shared_axes = None
   elif not isinstance(shared_axes, (list, tuple)):
     self.shared_axes = [shared_axes]
   else:
     self.shared_axes = list(shared_axes)
Esempio n. 43
0
  def add_weight(self,
                 name,
                 shape,
                 dtype=None,
                 initializer="zeros",
                 trainable=None,
                 synchronization=tf_variables.VariableSynchronization.AUTO,
                 aggregation=tf_variables.VariableAggregation.NONE):

    if dtype is None:
      dtype = dtypes.float32
    if isinstance(initializer, six.string_types) or callable(initializer):
      initializer = initializers.get(initializer)

    if synchronization == tf_variables.VariableSynchronization.ON_READ:
      if trainable:
        raise ValueError(
            "Synchronization value can be set to "
            "VariableSynchronization.ON_READ only for non-trainable variables. "
            "You have specified trainable=True and "
            "synchronization=VariableSynchronization.ON_READ.")
      else:
        # Set trainable to be false when variable is to be synced on read.
        trainable = False
    elif trainable is None:
      trainable = True

    variable = self._add_variable_with_custom_getter(
        name=name,
        shape=shape,
        getter=base_layer_utils.make_variable,
        overwrite=True,
        initializer=initializer,
        dtype=dtype,
        trainable=trainable,
        use_resource=True,
        synchronization=synchronization,
        aggregation=aggregation)
    backend.track_variable(variable)

    return variable
Esempio n. 44
0
  def __init__(self,
               axis=-1,
               momentum=0.99,
               epsilon=1e-3,
               center=True,
               scale=True,
               beta_initializer='zeros',
               gamma_initializer='ones',
               moving_mean_initializer='zeros',
               moving_variance_initializer='ones',
               beta_regularizer=None,
               gamma_regularizer=None,
               beta_constraint=None,
               gamma_constraint=None,
               renorm=False,
               renorm_clipping=None,
               renorm_momentum=0.99,
               fused=None,
               trainable=True,
               virtual_batch_size=None,
               adjustment=None,
               name=None,
               **kwargs):
    super(BatchNormalizationBase, self).__init__(
        name=name, trainable=trainable, **kwargs)
    if isinstance(axis, list):
      self.axis = axis[:]
    elif isinstance(axis, int):
      self.axis = axis
    else:
      raise TypeError('axis must be int or list, type given: %s'
                      % type(self.axis))
    self.momentum = momentum
    self.epsilon = epsilon
    self.center = center
    self.scale = scale
    self.beta_initializer = initializers.get(beta_initializer)
    self.gamma_initializer = initializers.get(gamma_initializer)
    self.moving_mean_initializer = initializers.get(moving_mean_initializer)
    self.moving_variance_initializer = initializers.get(
        moving_variance_initializer)
    self.beta_regularizer = regularizers.get(beta_regularizer)
    self.gamma_regularizer = regularizers.get(gamma_regularizer)
    self.beta_constraint = constraints.get(beta_constraint)
    self.gamma_constraint = constraints.get(gamma_constraint)
    self.renorm = renorm
    self.virtual_batch_size = virtual_batch_size
    self.adjustment = adjustment
    if self._USE_V2_BEHAVIOR:
      if fused:
        self._raise_if_fused_cannot_be_used()
      # We leave fused as None if self._fused_can_be_used()==True, since we
      # still may set it to False in self.build() if the input rank is not 4.
      elif fused is None and not self._fused_can_be_used():
        fused = False
    elif fused is None:
      fused = True
    self.supports_masking = True

    self.fused = fused
    self._bessels_correction_test_only = True

    if renorm:
      renorm_clipping = renorm_clipping or {}
      keys = ['rmax', 'rmin', 'dmax']
      if set(renorm_clipping) - set(keys):
        raise ValueError('renorm_clipping %s contains keys not in %s' %
                         (renorm_clipping, keys))
      self.renorm_clipping = renorm_clipping
      self.renorm_momentum = renorm_momentum