示例#1
0
def test_elu():
    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.elu(x, 0.5)])

    test_values = get_standard_values()
    result = f([test_values])[0]
    assert_allclose(result, test_values, rtol=1e-05)

    negative_values = np.array([[-1, -2]], dtype=K.floatx())
    # cntk can't rebind the input shape, so create the model again to test different batch size
    if (K.backend() == 'cntk'):
        x2 = K.placeholder(ndim=2)
        f = K.function([x2], [activations.elu(x2, 0.5)])
    result = f([negative_values])[0]
    true_result = (np.exp(negative_values) - 1) / 2

    assert_allclose(result, true_result)
示例#2
0
def test_elu():
    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.elu(x, 0.5)])

    test_values = get_standard_values()
    result = f([test_values])[0]
    assert_allclose(result, test_values, rtol=1e-05)

    negative_values = np.array([[-1, -2]], dtype=K.floatx())
    # cntk can't rebind the input shape, so create the model again to test different batch size
    if (K.backend() == 'cntk'):
        x2 = K.placeholder(ndim=2)
        f = K.function([x2], [activations.elu(x2, 0.5)])
    result = f([negative_values])[0]
    true_result = (np.exp(negative_values) - 1) / 2

    assert_allclose(result, true_result)
示例#3
0
def selu(x):
    """Scaled Exponential Linear Unit. (Klambauer et al., 2017)
       # Arguments
           x: A tensor or variable to compute the activation function for.
       # References
           - [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
       """
    alpha = 1.6732632423543772848170429916717
    scale = 1.0507009873554804934193349852946
    return scale * elu(x, alpha)
示例#4
0
 def test_elu(self):
     x = backend.placeholder(ndim=2)
     f = backend.function([x], [activations.elu(x, 0.5)])
     test_values = np.random.random((2, 5))
     result = f([test_values])[0]
     self.assertAllClose(result, test_values, rtol=1e-05)
     negative_values = np.array([[-1, -2]], dtype=backend.floatx())
     result = f([negative_values])[0]
     true_result = (np.exp(negative_values) - 1) / 2
     self.assertAllClose(result, true_result)
示例#5
0
def test_elu():
    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.elu(x, 0.5)])

    test_values = get_standard_values()
    result = f([test_values])[0]
    assert_allclose(result, test_values, rtol=1e-05)

    negative_values = np.array([[-1, -2]], dtype=K.floatx())
    result = f([negative_values])[0]
    true_result = (np.exp(negative_values) - 1) / 2

    assert_allclose(result, true_result)
示例#6
0
def test_elu():
    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.elu(x, 0.5)])

    test_values = get_standard_values()
    result = f([test_values])[0]
    assert_allclose(result, test_values, rtol=1e-05)

    negative_values = np.array([[-1, -2]], dtype=K.floatx())
    result = f([negative_values])[0]
    true_result = (np.exp(negative_values) - 1) / 2

    assert_allclose(result, true_result)
示例#7
0
def selu(x):
    """
    Scaled Exponential Linear Unit. (Klambauer et al., 2017)

    # References
        - [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)

    :param x: tensor or variable to compute the activation function
    :return:
    """
    from keras.activations import elu
    alpha = 1.6732632423543772848170429916717
    scale_ = 1.0507009873554804934193349852946

    return scale_ * elu(x, alpha)
示例#8
0
def eluu(x):
    return elu(x, alpha=2)
示例#9
0
def selu(x):
    alpha = 1.6732632423543772848170429916717
    scale = 1.0507009873554804934193349852946
    return scale * elu(x, alpha)
示例#10
0
import numpy as np
import matplotlib.pyplot as plt
from keras.activations import elu
'''
def elu(x, a=1):
    y_list = []
    for x in x:
        if x > 0 :
            y = x
        if x<= 0 :
            y = a*(np.exp(x)-1)
        y_list.append(y)
    return y_list
'''


def elu(x, a=1):
    return list(map(lambda x: x if x > 0 else a * (np.exp(x) - 1), x))


x = np.arange(-5, 5, 0.1)
y = elu(x)

plt.plot(x, y)
plt.grid()
plt.show()
示例#11
0
    })


## nonlinearities: add magic constants
def gelu(x):
    cdf = 0.5 * (1.0 + tf.erf(x / tf.sqrt(2.0)))
    return x * cdf


nonlinearities = {
    'identity':
    Lambda(lambda x: x),
    'celu':
    Lambda(lambda x: tf.nn.crelu(x) * 1.270926833152771),
    'elu':
    Lambda(lambda x: elu(x) * 1.2716004848480225),
    'gelu':
    Lambda(lambda x: gelu(x) * 1.7015043497085571),
    #     'glu': lambda x: jax.nn.glu(x) * 1.8484294414520264,
    'leaky_relu':
    Lambda(lambda x: tf.nn.leaky_relu(x) * 1.70590341091156),
    'log_sigmoid':
    Lambda(lambda x: tf.math.log(tf.nn.sigmoid(x)) * 1.9193484783172607),
    'log_softmax':
    Lambda(lambda x: tf.math.log(tf.nn.softmax(x)) * 1.0002083778381348),
    'relu':
    Lambda(lambda x: relu(x) * 1.7139588594436646),
    'relu6':
    Lambda(lambda x: tf.nn.relu6(x) * 1.7131484746932983),
    'selu':
    Lambda(lambda x: selu(x) * 1.0008515119552612),