コード例 #1
0
def test_range_max(factor):
    f = _TrackEvals(np.sin)

    # Determine the range.
    central_fdm(8, 1, adapt=2)(f, 1)
    true_range = f.true_range(1)

    # Limit the range.
    f.evals_x.clear()
    central_fdm(8, 1, adapt=2)(f, 1, max_range=true_range / factor)
    approx(f.true_range(1), true_range / factor)
コード例 #2
0
def test_hvp():
    m_jac = central_fdm(10, 1, adapt=1)
    m_dir = central_fdm(10, 1, adapt=0)
    a = np.random.randn(3, 3)

    def f(x):
        return 0.5 * np.matmul(x, np.matmul(a, x))

    x = np.random.randn(3)
    v = np.random.randn(3)
    allclose(
        hvp(f, v, jac_method=m_jac, dir_method=m_dir)(x),
        np.matmul(0.5 * (a + a.T), v)[None, :])
コード例 #3
0
def test_step_limiting():
    m = central_fdm(2, 1)
    f = lambda x: np.exp(1e-3 * x)
    x = 0

    step_max = m.estimate().step * 1000
    assert m.estimate(f, x).step == step_max
コード例 #4
0
def test_jacobian():
    m = central_fdm(10, 1)
    a = np.random.randn(3, 3)

    def f(x):
        return np.matmul(a, x)

    x = np.random.randn(3)
    allclose(jacobian(f, m)(x), a)
コード例 #5
0
def test_order_monotonicity():
    err_ref = 1e-4

    for i in range(3, 8):
        err = np.abs(central_fdm(i, 2, condition=1)(np.sin, 1) + np.sin(1))

        # Check that it did better than the previous estimator.
        assert err <= err_ref

        err_ref = err
コード例 #6
0
def test_jvp_directional():
    m = central_fdm(10, 1)
    a = np.random.randn(3)

    def f(x):
        return np.sum(a * x)

    x = np.random.randn(3)
    v = np.random.randn(3)
    allclose(np.sum(gradient(f, m)(x) * v), jvp(f, v, m)(x))
コード例 #7
0
def test_jvp():
    m = central_fdm(10, 1)
    a = np.random.randn(3, 3)

    def f(x):
        return np.matmul(a, x)

    x = np.random.randn(3)
    v = np.random.randn(3)
    allclose(jvp(f, v, m)(x), np.matmul(a, v))
コード例 #8
0
def test_zero_bound_zero_error_not_fixed():
    m = central_fdm(2, 1)
    f = lambda _: 0
    x = 0

    assert m.bound_estimator(f, x) == 0
    assert m.bound_estimator(f, x, magnitude=True)[0] == 0
    assert m.bound_estimator(f, x, magnitude=True)[1] == 0

    approx(m(f, x), 0)
コード例 #9
0
def test_zero_bound_fixed():
    m = central_fdm(2, 1)
    f = np.sin
    x = 0

    assert m.bound_estimator(f, x) == 0
    assert m.bound_estimator(f, x, magnitude=True)[0] > 0
    assert m.bound_estimator(f, x, magnitude=True)[1] > 0

    approx(m(f, x), 1)
コード例 #10
0
def test_order_monotonicity():
    err_ref = 1e-4

    for i in range(3, 10):
        method = central_fdm(i, 2, adapt=1)

        # Check order of method.
        assert method.order == i

        err = np.abs(method(np.sin, 1) + np.sin(1))

        # Check that it did better than the previous estimator.
        assert err <= err_ref

        err_ref = err
コード例 #11
0
def test_gradient_vector_argument():
    m = central_fdm(10, 1)

    for a, x in zip(
        [np.random.randn(),
         np.random.randn(3),
         np.random.randn(3, 3)],
        [np.random.randn(),
         np.random.randn(3),
         np.random.randn(3, 3)]):

        def f(y):
            return np.sum(a * y * y)

        allclose(2 * a * x, gradient(f, m)(x))
コード例 #12
0
def test_estimation():
    m = central_fdm(2, 1)

    assert isinstance(m.coefs, np.ndarray)
    assert isinstance(m.df_magnitude_mult, float)
    assert isinstance(m.f_error_mult, float)
    assert m.step is None
    assert m.acc is None

    m.estimate()

    assert isinstance(m.step, float)
    assert isinstance(m.acc, float)

    m(np.sin, 0, step=1e-3)

    assert isinstance(m.step, float)
    assert m.acc is None
コード例 #13
0
    def diff_approx(self, deriv=1, order=6):
        """Approximate the derivative of the GP by constructing a finite
        difference approximation.

        Args:
            deriv (int, optional): Order of the derivative. Defaults to `1`.
            order (int, optional): Order of the estimate. Defaults to `6`.

        Returns:
            :class:`.measure.GP`: Approximation of the derivative of the GP.
        """
        # Use the FDM library to figure out the coefficients.
        fdm = central_fdm(order, deriv, adapt=0, factor=1e8)
        fdm.estimate()  # Estimate step size.

        # Construct finite difference.
        df = 0
        for g, c in zip(fdm.grid, fdm.coefs):
            df += c * self.shift(-g * fdm.step)
        return df / fdm.step**deriv
コード例 #14
0
def test_estimation():
    m = central_fdm(2, 1)

    assert m.eps is None
    assert m.bound is None
    assert m.step is None
    assert m.acc is None

    m.estimate()

    assert isinstance(m.eps, float)
    assert isinstance(m.bound, float)
    assert isinstance(m.step, float)
    assert isinstance(m.acc, float)

    m(np.sin, 0, step=1e-3)

    assert m.eps is None
    assert m.bound is None
    assert isinstance(m.step, float)
    assert m.acc is None
コード例 #15
0
    def diff_approx(self, deriv=1, order=5, eps=1e-8, bound=1.):
        """Approximate the derivative of the GP by constructing a finite
        difference approximation.

        Args:
            deriv (int): Order of the derivative.
            order (int): Order of the estimate.
            eps (float, optional): Absolute round-off error of the function
                evaluation. This is used to estimate the step size.
            bound (float, optional): Upper bound of the absolute value of the
                function and all its derivatives. This is used to estimate
                the step size.

        Returns:
            Approximation of the derivative of the GP.
        """
        # Use the FDM library to figure out the coefficients.
        fdm = central_fdm(order, deriv, eps=eps, bound=bound)

        # Construct finite difference.
        df = 0
        for g, c in zip(fdm.grid, fdm.coefs):
            df += c * self.shift(-g * fdm.step)
        return df / fdm.step**deriv
コード例 #16
0
def test_tiny():
    # Check that `tiny` added in :meth:`.fdm.FDM.estimate` stabilises the
    # numerics.
    assert central_fdm(2, 1, adapt=0)(lambda x: 0.0, 1.0) == 0.0
    assert central_fdm(2, 1, adapt=1, step_max=np.inf)(lambda x: x, 1.0) == 1.0
コード例 #17
0
def test_condition():
    assert (central_fdm(2, 1, condition=1).estimate().step == 2 *
            central_fdm(2, 1, condition=4).estimate().step)
コード例 #18
0
def test_factor():
    assert (central_fdm(2, 1, factor=4).estimate().step == 2 *
            central_fdm(2, 1, factor=1).estimate().step)
コード例 #19
0
def test_factor():
    assert central_fdm(3, 1, factor=5).estimate().eps == \
           5 * central_fdm(3, 1, factor=1).estimate().eps
コード例 #20
0
def test_step_max():
    assert central_fdm(20, 1, step_max=np.inf).estimate().step > 0.1
    assert central_fdm(20, 1, step_max=0.1).estimate().step == 0.1
コード例 #21
0
def test_default_step():
    approx(
        central_fdm(2, 1).estimate().step,
        np.sqrt(2 * np.finfo(np.float64).eps / 10))