Пример #1
0
def test_variable_sum():
    a_array = np.arange(5)
    a = Variable(np.arange(5, dtype=float))
    expected_result_variable = Variable(10)
    expected_result_number = 10

    assert np.all(a_array == a.data)
    assert np.all(expected_result_variable == expected_result_number)

    assert id(a_array) != id(a.data)
    assert id(expected_result_variable) != id(expected_result_number)

    def validate_variable_result(result) -> None:
        assert result.eq(expected_result_variable).all()
        assert isinstance(result, Variable)
        return

    def validate_number_result(result) -> None:
        assert np.all(result == expected_result_number)
        float(result)  # error means it can't be converted to a float
        assert isinstance(result, np.number)
        return

    # Variable
    validate_variable_result(a.sum())
    validate_variable_result(np.sum(a))

    # numpy
    validate_number_result(a_array.sum())
    validate_number_result(np.sum(a_array))

    # Verify Derivative
    sgd = autograd.optimizer.SGD(learning_rate=1e-3)
    summation = a.sum()
    variable_to_gradient = sgd.take_training_step(summation)
    assert all(
        isinstance(var, Variable) and isinstance(grad, np.ndarray)
        for var, grad in variable_to_gradient.items())
    assert np.all(variable_to_gradient[a] == np.ones(a.shape))

    # Verify Trainability
    x = Variable(np.random.rand(2))
    y = 10
    sgd = autograd.optimizer.SGD(learning_rate=1e-1)
    for training_step_index in range(1_000):
        y_hat = x.sum()
        diff = np.subtract(y, y_hat)
        loss = diff**2
        if training_step_index > 10 and loss.sum() < 1e-10:
            break
        sgd.take_training_step(loss)
Пример #2
0
        isinstance(var, Variable) and isinstance(grad, np.ndarray)
        for var, grad in variable_to_gradient.items())
    assert np.all(variable_to_gradient[a] == np.ones(a.shape))

    # Verify Trainability
    x = Variable(np.random.rand(2))
    y = 10
    sgd = autograd.optimizer.SGD(learning_rate=1e-1)
    for training_step_index in range(1_000):
        y_hat = x.sum()
        diff = np.subtract(y, y_hat)
        loss = diff**2
        if training_step_index > 10 and loss.sum() < 1e-10:
            break
        sgd.take_training_step(loss)
    assert np.abs(x.sum() - 10) < 1e-4
    assert loss.sum() < 1e-10


def test_variable_abs():
    a_array = np.array([0, -1, -2, 3])
    a = Variable(np.array([0, -1, -2, 3], dtype=float))
    expected_result_variable = Variable(np.arange(4))
    expected_result_array = np.arange(4)

    assert np.all(a_array == a.data)
    assert np.all(expected_result_variable == expected_result_array)

    assert id(a_array) != id(a.data)
    assert id(expected_result_variable) != id(expected_result_array)