Esempio n. 1
0
def selu(x):
  """Scaled Exponential Linear Unit (SELU).

  SELU is equal to: `scale * elu(x, alpha)`, where alpha and scale
  are pre-defined constants. The values of `alpha` and `scale` are
  chosen so that the mean and variance of the inputs are preserved
  between two consecutive layers as long as the weights are initialized
  correctly (see `lecun_normal` initialization) and the number of inputs
  is "large enough" (see references for more information).

  Arguments:
      x: A tensor or variable to compute the activation function for.

  Returns:
      The scaled exponential unit activation: `scale * elu(x, alpha)`.

  # Note
      - To be used together with the initialization "lecun_normal".
      - To be used together with the dropout variant "AlphaDropout".

  References:
      - [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
  """
  alpha = 1.6732632423543772848170429916717
  scale = 1.0507009873554804934193349852946
  return scale * K.elu(x, alpha)
Esempio n. 2
0
def selu(x):
    """Scaled Exponential Linear Unit (SELU).

  SELU is equal to: `scale * elu(x, alpha)`, where alpha and scale
  are pre-defined constants. The values of `alpha` and `scale` are
  chosen so that the mean and variance of the inputs are preserved
  between two consecutive layers as long as the weights are initialized
  correctly (see `lecun_normal` initialization) and the number of inputs
  is "large enough" (see references for more information).

  Arguments:
      x: A tensor or variable to compute the activation function for.

  Returns:
      The scaled exponential unit activation: `scale * elu(x, alpha)`.

  # Note
      - To be used together with the initialization "lecun_normal".
      - To be used together with the dropout variant "AlphaDropout".

  References:
      - [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
  """
    alpha = 1.6732632423543772848170429916717
    scale = 1.0507009873554804934193349852946
    return scale * K.elu(x, alpha)
Esempio n. 3
0
def selu(x):
    """Scaled Exponential Linear Unit (SELU).

  The Scaled Exponential Linear Unit (SELU) activation function is:
  `scale * x` if `x > 0` and `scale * alpha * (exp(x) - 1)` if `x < 0`
  where `alpha` and `scale` are pre-defined constants
  (`alpha = 1.67326324`
  and `scale = 1.05070098`).
  The SELU activation function multiplies  `scale` > 1 with the
  `[elu](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/activations/elu)`
  (Exponential Linear Unit (ELU)) to ensure a slope larger than one
  for positive net inputs.

  The values of `alpha` and `scale` are
  chosen so that the mean and variance of the inputs are preserved
  between two consecutive layers as long as the weights are initialized
  correctly (see [`lecun_normal` initialization]
  (https://www.tensorflow.org/api_docs/python/tf/keras/initializers/lecun_normal))
  and the number of inputs is "large enough"
  (see references for more information).

  ![](https://cdn-images-1.medium.com/max/1600/1*m0e8lZU_Zrkh4ESfQkY2Pw.png)
  (Courtesy: Blog on Towards DataScience at
  https://towardsdatascience.com/selu-make-fnns-great-again-snn-8d61526802a9)

  Example Usage:

  >>> n_classes = 10  #10-class problem
  >>> from tensorflow.python.keras.layers import Dense
  >>> model = tf.keras.Sequential()
  >>> model.add(Dense(64, kernel_initializer='lecun_normal',
  ...                 activation='selu', input_shape=(28, 28, 1)))
  >>> model.add(Dense(32, kernel_initializer='lecun_normal',
  ...                 activation='selu'))
  >>> model.add(Dense(16, kernel_initializer='lecun_normal',
  ...                 activation='selu'))
  >>> model.add(Dense(n_classes, activation='softmax'))

  Arguments:
      x: A tensor or variable to compute the activation function for.

  Returns:
      The scaled exponential unit activation: `scale * elu(x, alpha)`.

  # Note
      - To be used together with the initialization "[lecun_normal]
      (https://www.tensorflow.org/api_docs/python/tf/keras/initializers/lecun_normal)".
      - To be used together with the dropout variant "[AlphaDropout]
      (https://www.tensorflow.org/api_docs/python/tf/keras/layers/AlphaDropout)".

  References:
      [Self-Normalizing Neural Networks (Klambauer et al, 2017)]
      (https://arxiv.org/abs/1706.02515)
  """
    alpha = 1.6732632423543772848170429916717
    scale = 1.0507009873554804934193349852946
    return scale * K.elu(x, alpha)
Esempio n. 4
0
def selu(x):
  """Scaled Exponential Linear Unit (SELU).

  The Scaled Exponential Linear Unit (SELU) activation function is:
  `scale * x` if `x > 0` and `scale * alpha * (exp(x) - 1)` if `x < 0`
  where `alpha` and `scale` are pre-defined constants
  (`alpha = 1.67326324`
  and `scale = 1.05070098`).
  The SELU activation function multiplies  `scale` > 1 with the
  `[elu](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/activations/elu)`
  (Exponential Linear Unit (ELU)) to ensure a slope larger than one
  for positive net inputs.

  The values of `alpha` and `scale` are
  chosen so that the mean and variance of the inputs are preserved
  between two consecutive layers as long as the weights are initialized
  correctly (see [`lecun_normal` initialization]
  (https://www.tensorflow.org/api_docs/python/tf/keras/initializers/lecun_normal))
  and the number of inputs is "large enough"
  (see references for more information).

  ![](https://cdn-images-1.medium.com/max/1600/1*m0e8lZU_Zrkh4ESfQkY2Pw.png)
  (Courtesy: Blog on Towards DataScience at
  https://towardsdatascience.com/selu-make-fnns-great-again-snn-8d61526802a9)

  Example Usage:
  ```python3
  n_classes = 10 #10-class problem
  model = models.Sequential()
  model.add(Dense(64, kernel_initializer='lecun_normal', activation='selu',
  input_shape=(28, 28, 1))))
  model.add(Dense(32, kernel_initializer='lecun_normal', activation='selu'))
  model.add(Dense(16, kernel_initializer='lecun_normal', activation='selu'))
  model.add(Dense(n_classes, activation='softmax'))
  ```

  Arguments:
      x: A tensor or variable to compute the activation function for.

  Returns:
      The scaled exponential unit activation: `scale * elu(x, alpha)`.

  # Note
      - To be used together with the initialization "[lecun_normal]
      (https://www.tensorflow.org/api_docs/python/tf/keras/initializers/lecun_normal)".
      - To be used together with the dropout variant "[AlphaDropout]
      (https://www.tensorflow.org/api_docs/python/tf/keras/layers/AlphaDropout)".

  References:
      [Self-Normalizing Neural Networks (Klambauer et al, 2017)]
      (https://arxiv.org/abs/1706.02515)
  """
  alpha = 1.6732632423543772848170429916717
  scale = 1.0507009873554804934193349852946
  return scale * K.elu(x, alpha)
Esempio n. 5
0
def elu(x, alpha=1.0):
    """Exponential linear unit.

  Arguments:
      x: Input tensor.
      alpha: A scalar, slope of negative section.

  Returns:
      The exponential linear activation: `x` if `x > 0` and
        `alpha * (exp(x)-1)` if `x < 0`.

  Reference:
      - [Clevert et al. 2016](https://arxiv.org/abs/1511.07289)
  """
    return K.elu(x, alpha)
Esempio n. 6
0
def elu(x, alpha=1.0):
    """Exponential linear unit.

  Arguments:
      x: Input tensor.
      alpha: A scalar, slope of negative section.

  Returns:
      The exponential linear activation: `x` if `x > 0` and
        `alpha * (exp(x)-1)` if `x < 0`.

  Reference:
      - [Fast and Accurate Deep Network Learning by Exponential
        Linear Units (ELUs)](https://arxiv.org/abs/1511.07289)
  """
    return K.elu(x, alpha)
Esempio n. 7
0
def elu(x, alpha=1.0):
  """Exponential linear unit.

  Arguments:
      x: Input tensor.
      alpha: A scalar, slope of negative section.

  Returns:
      The exponential linear activation: `x` if `x > 0` and
        `alpha * (exp(x)-1)` if `x < 0`.

  Reference:
      - [Fast and Accurate Deep Network Learning by Exponential
        Linear Units (ELUs)](https://arxiv.org/abs/1511.07289)
  """
  return K.elu(x, alpha)
Esempio n. 8
0
def elu(x, alpha=1.0):
    """Exponential Linear Unit.

  The exponential linear unit (ELU) with `alpha > 0` is:
  `x` if `x > 0` and
  `alpha * (exp(x) - 1)` if `x < 0`
  The ELU hyperparameter `alpha` controls the value to which an
  ELU saturates for negative net inputs. ELUs diminish the
  vanishing gradient effect.

  ELUs have negative values which pushes the mean of the activations
  closer to zero.
  Mean activations that are closer to zero enable faster learning as they
  bring the gradient closer to the natural gradient.
  ELUs saturate to a negative value when the argument gets smaller.
  Saturation means a small derivative which decreases the variation
  and the information that is propagated to the next layer.

  Example Usage:

  >>> import tensorflow as tf
  >>> model = tf.keras.Sequential()
  >>> model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='elu',
  ...          input_shape=(28, 28, 1)))
  >>> model.add(tf.keras.layers.MaxPooling2D((2, 2)))
  >>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu'))
  >>> model.add(tf.keras.layers.MaxPooling2D((2, 2)))
  >>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu'))

  <tensorflow.python.keras.engine.sequential.Sequential object ...>

  Args:
      x: Input tensor.
      alpha: A scalar, slope of negative section. `alpha` controls the value to
        which an ELU saturates for negative net inputs.

  Returns:
      The exponential linear unit (ELU) activation function: `x` if `x > 0` and
      `alpha * (exp(x) - 1)` if `x < 0`.


  Reference:
      [Fast and Accurate Deep Network Learning by Exponential Linear Units
      (ELUs) (Clevert et al, 2016)](https://arxiv.org/abs/1511.07289)
  """
    return backend.elu(x, alpha)
Esempio n. 9
0
def selu(x):
    """Scaled Exponential Linear Unit. (Klambauer et al., 2017).

  Arguments:
      x: A tensor or variable to compute the activation function for.

  Returns:
      Tensor with the same shape and dtype as `x`.

  # Note
      - To be used together with the initialization "lecun_normal".
      - To be used together with the dropout variant "AlphaDropout".

  """
    alpha = 1.6732632423543772848170429916717
    scale = 1.0507009873554804934193349852946
    return scale * K.elu(x, alpha)
Esempio n. 10
0
def selu(x):
  """Scaled Exponential Linear Unit. (Klambauer et al., 2017).

  Arguments:
      x: A tensor or variable to compute the activation function for.

  Returns:
      Tensor with the same shape and dtype as `x`.

  # Note
      - To be used together with the initialization "lecun_normal".
      - To be used together with the dropout variant "AlphaDropout".

  """
  alpha = 1.6732632423543772848170429916717
  scale = 1.0507009873554804934193349852946
  return scale * K.elu(x, alpha)
Esempio n. 11
0
 def call(self, inputs):
     return K.elu(inputs, self.alpha)
Esempio n. 12
0
 def call(self, inputs):
   return K.elu(inputs, self.alpha)
Esempio n. 13
0
def selu(x):
    alpha = 1.6732632423543772848170429916717
    scale = 1.0507009873554804934193349852946
    return scale * K.elu(x, alpha)
Esempio n. 14
0
def elu(x, alpha=1.0):
    return K.elu(x, alpha)
Esempio n. 15
0
 def call(self, inputs):
     return backend.elu(inputs, self.alpha)
Esempio n. 16
0
def elu(x, alpha=1.0):
  return K.elu(x, alpha)