def Selu(self, x):
        '''Composes Keras' implementation for SELU Activation Function

        Arguments:
            x (tensor):
                Input tensor

        Returns:
            Tensor, output of 'SELU' activatin function 
        '''
        return selu(x)
Exemple #2
0
def test_selu():
    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.selu(x)])
    alpha = 1.6732632423543772848170429916717
    scale = 1.0507009873554804934193349852946

    positive_values = get_standard_values()
    result = f([positive_values])[0]
    assert_allclose(result, positive_values * scale, rtol=1e-05)

    negative_values = np.array([[-1, -2]], dtype=K.floatx())

    # cntk can't rebind the input shape, so create the model again to test different batch size
    if (K.backend() == 'cntk'):
        x2 = K.placeholder(ndim=2)
        f = K.function([x2], [activations.selu(x2)])

    result = f([negative_values])[0]
    true_result = (np.exp(negative_values) - 1) * scale * alpha

    assert_allclose(result, true_result)
Exemple #3
0
def test_selu():
    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.selu(x)])
    alpha = 1.6732632423543772848170429916717
    scale = 1.0507009873554804934193349852946

    positive_values = get_standard_values()
    result = f([positive_values])[0]
    assert_allclose(result, positive_values * scale, rtol=1e-05)

    negative_values = np.array([[-1, -2]], dtype=K.floatx())

    # cntk can't rebind the input shape, so create the model again to
    # test different batch size
    if (K.backend() == 'cntk'):
        x2 = K.placeholder(ndim=2)
        f = K.function([x2], [activations.selu(x2)])

    result = f([negative_values])[0]
    true_result = (np.exp(negative_values) - 1) * scale * alpha

    assert_allclose(result, true_result)
Exemple #4
0
    def test_selu(self):
        x = backend.placeholder(ndim=2)
        f = backend.function([x], [activations.selu(x)])
        alpha = 1.6732632423543772848170429916717
        scale = 1.0507009873554804934193349852946

        positive_values = np.array([[1, 2]], dtype=backend.floatx())
        result = f([positive_values])[0]
        self.assertAllClose(result, positive_values * scale, rtol=1e-05)

        negative_values = np.array([[-1, -2]], dtype=backend.floatx())
        result = f([negative_values])[0]
        true_result = (np.exp(negative_values) - 1) * scale * alpha
        self.assertAllClose(result, true_result)
def test_selu():
    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.selu(x)])
    alpha = 1.6732632423543772848170429916717
    scale = 1.0507009873554804934193349852946

    positive_values = get_standard_values()
    result = f([positive_values])[0]
    assert_allclose(result, positive_values * scale, rtol=1e-05)

    negative_values = np.array([[-1, -2]], dtype=K.floatx())

    result = f([negative_values])[0]
    true_result = (np.exp(negative_values) - 1) * scale * alpha

    assert_allclose(result, true_result)
Exemple #6
0
def test_selu():
    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.selu(x)])
    alpha = 1.6732632423543772848170429916717
    scale = 1.0507009873554804934193349852946

    positive_values = get_standard_values()
    result = f([positive_values])[0]
    assert_allclose(result, positive_values * scale, rtol=1e-05)

    negative_values = np.array([[-1, -2]], dtype=K.floatx())

    result = f([negative_values])[0]
    true_result = (np.exp(negative_values) - 1) * scale * alpha

    assert_allclose(result, true_result)
 def call(self, x):
     return K.concatenate((selu(x), selu(-x)), -1)
Exemple #8
0
    'celu':
    Lambda(lambda x: tf.nn.crelu(x) * 1.270926833152771),
    'elu':
    Lambda(lambda x: elu(x) * 1.2716004848480225),
    'gelu':
    Lambda(lambda x: gelu(x) * 1.7015043497085571),
    #     'glu': lambda x: jax.nn.glu(x) * 1.8484294414520264,
    'leaky_relu':
    Lambda(lambda x: tf.nn.leaky_relu(x) * 1.70590341091156),
    'log_sigmoid':
    Lambda(lambda x: tf.math.log(tf.nn.sigmoid(x)) * 1.9193484783172607),
    'log_softmax':
    Lambda(lambda x: tf.math.log(tf.nn.softmax(x)) * 1.0002083778381348),
    'relu':
    Lambda(lambda x: relu(x) * 1.7139588594436646),
    'relu6':
    Lambda(lambda x: tf.nn.relu6(x) * 1.7131484746932983),
    'selu':
    Lambda(lambda x: selu(x) * 1.0008515119552612),
    'sigmoid':
    Lambda(lambda x: sigmoid(x) * 4.803835391998291),
    'silu':
    Lambda(lambda x: tf.nn.silu(x) * 1.7881293296813965),
    'soft_sign':
    Lambda(lambda x: tf.nn.softsign(x) * 2.338853120803833),
    'softplus':
    Lambda(lambda x: softplus(x) * 1.9203323125839233),
    'tanh':
    Lambda(lambda x: tanh(x) * 1.5939117670059204),
}