示例#1
0
文件: tests.py 项目: Yeahp/nebula
def test_sigmoid_activation(N=None):
    from activations import Sigmoid

    N = np.inf if N is None else N

    mine = Sigmoid()
    gold = expit

    i = 0
    while i < N:
        n_dims = np.random.randint(1, 100)
        z = random_tensor((1, n_dims))
        assert_almost_equal(mine.fn(z), gold(z))
        print("PASSED")
        i += 1
示例#2
0
文件: tests.py 项目: Yeahp/nebula
def test_softplus_grad(N=None):
    from activations import SoftPlus

    N = np.inf if N is None else N

    mine = SoftPlus()
    gold = torch_gradient_generator(F.softplus)

    i = 0
    while i < N:
        n_ex = np.random.randint(1, 100)
        n_dims = np.random.randint(1, 100)
        z = random_tensor((n_ex, n_dims), standardize=True)
        assert_almost_equal(mine.grad(z), gold(z))
        print("PASSED")
        i += 1
示例#3
0
文件: tests.py 项目: Yeahp/nebula
def test_selu_grad(N=None):
    from activations import SELU

    N = np.inf if N is None else N

    mine = SELU()
    gold = torch_gradient_generator(F.selu)

    i = 0
    while i < N:
        n_ex = np.random.randint(1, 100)
        n_dims = np.random.randint(1, 100)
        z = random_tensor((n_ex, n_dims))
        assert_almost_equal(mine.grad(z), gold(z), decimal=6)
        print("PASSED")
        i += 1
示例#4
0
文件: tests.py 项目: Yeahp/nebula
def test_tanh_grad(N=None):
    from activations import Tanh

    N = np.inf if N is None else N

    mine = Tanh()
    gold = torch_gradient_generator(torch.tanh)

    i = 0
    while i < N:
        n_ex = np.random.randint(1, 100)
        n_dims = np.random.randint(1, 100)
        z = random_tensor((n_ex, n_dims))
        assert_almost_equal(mine.grad(z), gold(z))
        print("PASSED")
        i += 1
示例#5
0
文件: tests.py 项目: Yeahp/nebula
def test_leakyrelu_grad(N=None):
    from activations import LeakyReLU

    N = np.inf if N is None else N

    i = 0
    while i < N:
        n_ex = np.random.randint(1, 10)
        n_dims = np.random.randint(1, 10)
        alpha = np.random.uniform(0, 10)
        z = random_tensor((n_ex, n_dims))

        mine = LeakyReLU(alpha)
        gold = torch_gradient_generator(F.leaky_relu, negative_slope=alpha)
        assert_almost_equal(mine.grad(z), gold(z), decimal=6)
        print("PASSED")
        i += 1
示例#6
0
文件: tests.py 项目: Yeahp/nebula
def test_elu_activation(N=None):
    from activations import ELU

    N = np.inf if N is None else N

    i = 0
    while i < N:
        n_dims = np.random.randint(1, 10)
        z = random_tensor((1, n_dims))

        alpha = np.random.uniform(0, 10)

        mine = ELU(alpha)
        gold = lambda z, a: F.elu(torch.from_numpy(z), alpha).numpy()

        assert_almost_equal(mine.fn(z), gold(z, alpha))
        print("PASSED")
        i += 1