def calculate_error(params, diff, func, grad):
    """Difference between numerical and analytical derivatives, devided by
        analytial derivative.

    Args:
        params(np.array): 1d numpy array of function arguments
        diff(np.array): difficulty parameter, controls wiggliness of the function
        func(np.array): functions for which derivatives are calculated
        grad(np.array): gradients of the functions

    Returns:
        error(np.array): numpy array of relative errors, calculated for different
            methods

    """
    method = ["center", "forward", "backward"]
    error = {}
    for i, m in enumerate(method):
        diff_dict = {"diff": diff}
        num_der = first_derivative(func,
                                   params,
                                   func_kwargs=diff_dict,
                                   method=m)
        analytical_der = grad(params, diff)
        error[m] = (num_der - analytical_der) / np.abs(analytical_der).clip(
            1e-8, np.inf)

    return error
Esempio n. 2
0
def test_first_derivative_scalar(method):
    def f(x):
        return x**2

    calculated = first_derivative(f, 3.0)
    expected = 6.0
    aaae(calculated, expected)
Esempio n. 3
0
def test_first_derivative_jacobian_richardson(
        example_function_jacobian_fixtures):
    f = example_function_jacobian_fixtures["func"]
    fprime = example_function_jacobian_fixtures["func_prime"]

    true_grad = fprime(np.ones(3))
    numdifftools_grad = Jacobian(f, order=2, n=3, method="central")(np.ones(3))
    grad = first_derivative(f, np.ones(3), n_steps=3, method="central")

    aaae(numdifftools_grad, grad)
    aaae(true_grad, grad)
Esempio n. 4
0
def test_first_derivative_gradient(binary_choice_inputs, method):
    fix = binary_choice_inputs
    func = partial(logit_loglike, y=fix["y"], x=fix["x"])

    calculated = first_derivative(
        func=func,
        method=method,
        x=fix["params_np"],
        n_steps=1,
        f0=func(fix["params_np"]),
        n_cores=1,
    )

    expected = logit_loglike_gradient(fix["params_np"], fix["y"], fix["x"])

    aaae(calculated, expected, decimal=4)
Esempio n. 5
0
def test_first_derivative_jacobian(binary_choice_inputs, method):
    fix = binary_choice_inputs
    func = partial(logit_loglikeobs, y=fix["y"], x=fix["x"])

    calculated = first_derivative(
        func=func,
        method=method,
        x=fix["params_np"],
        n_steps=1,
        base_steps=None,
        lower_bounds=np.full(fix["params_np"].shape, -np.inf),
        upper_bounds=np.full(fix["params_np"].shape, np.inf),
        min_steps=1e-8,
        step_ratio=2.0,
        f0=func(fix["params_np"]),
        n_cores=1,
    )

    expected = logit_loglikeobs_jacobian(fix["params_np"], fix["y"], fix["x"])

    aaae(calculated, expected, decimal=6)
Esempio n. 6
0
def test_first_derivative_jacobian_works_at_defaults(binary_choice_inputs):
    fix = binary_choice_inputs
    func = partial(logit_loglikeobs, y=fix["y"], x=fix["x"])
    calculated = first_derivative(func=func, x=fix["params_np"])
    expected = logit_loglikeobs_jacobian(fix["params_np"], fix["y"], fix["x"])
    aaae(calculated, expected, decimal=6)