Пример #1
0
def test_reduce_sum_mix():
    x1_val = 2 * np.ones(3)
    x1 = Tensor(x1_val)
    y = x1.sum().exp()
    expected_y_val = np.exp(np.sum(x1_val))
    assert np.array_equal(y.data, expected_y_val)
    y.backward()
    assert np.array_equal(x1.grad, expected_y_val * np.ones_like(x1_val))
    x1.zero_gard()
    y2 = x1.sum().log()
    expected_y2_val = np.log(np.sum(x1_val))
    assert np.array_equal(y2.data, expected_y2_val)
    y2.backward()
    assert np.array_equal(x1.grad, (1 / np.sum(x1_val)) * np.ones_like(x1_val))
Пример #2
0
def test_reduce_sum():
    x1_val = 2 * np.ones(3)
    x1 = Tensor(x1_val)
    y = x1.sum()
    assert np.array_equal(y.data, np.sum(x1_val))
    y.backward()
    assert np.array_equal(x1.grad, np.ones_like(x1_val))
Пример #3
0
def test_mix_all():
    x1_val = 2 * np.ones(3)
    x1 = Tensor(x1_val)
    y = 1 / (1 + (-x1.sum()).exp())
    expected_y_val = 1 / (1 + np.exp(-np.sum(x1_val)))
    expected_y_grad = expected_y_val * (1 - expected_y_val) * np.ones_like(x1_val)
    assert np.array_equal(y.data, expected_y_val)
    y.backward()
    assert np.sum(np.abs(x1.grad - expected_y_grad)) < 1E-10