Пример #1
0
def test_add() -> None:
    # Generate a few random points to use as test values.
    np.random.seed(6178)
    x_ = -1 + 2.0 * np.random.rand(100)

    # A random number to use as an arbitrary additive constant.
    # alpha = -0.194751428283640 + 0.079814485412665j

    # Check operation in the face of empty arguments.

    f1 = Function()
    g1 = Function(lambda x: x)
    assert len(f1 + f1) == 0
    assert len(f1 + g1) == 0
    assert len(g1 + f1) == 0

    # Check addition of two function objects.

    def f_op2(x):
        return np.zeros(len(x))

    f2 = Function(f_op2)
    assert check_add_function_to_function(f2, f_op2, f2, f_op2, x_)

    def f_op3(x):
        return np.exp(x) - 1.0

    f3 = Function(f_op3)

    def g_op3(x):
        return 1.0 / (1.0 + x**2)

    g3 = Function(g_op3)
    assert check_add_function_to_function(f3, f_op3, g3, g_op3, x_)

    def g_op4(x):
        return np.cos(1e4 * x)

    g4 = Function(g_op4)
    assert check_add_function_to_function(f3, f_op3, g4, g_op4, x_)

    def g_op5(t):
        return np.sinh(t * np.exp(2.0 * np.pi * 1.0j / 6.0))

    g5 = Function(g_op5)
    assert check_add_function_to_function(f3, f_op3, g5, g_op5, x_)

    # Check that direct construction and PLUS give comparable results.
    tol = 10 * np.spacing(1)
    f = Function(lambda x: x)
    g = Function(lambda x: np.cos(x) - 1.0)
    h1 = f + g
    h2 = Function(lambda x: x + np.cos(x) - 1.0)

    # TODO: Improve the constructor so that the following passes:
    assert np.linalg.norm(h1.coefficients[0] - h2.coefficients[0],
                          np.inf) < tol
Пример #2
0
def test_sub() -> None:

    # Generate a few random points to use as test values.
    x_ = -1.0 + 2.0 * np.random.rand(100)

    # A random number to use as an arbitrary additive constant.
    # alpha = np.random.randn() + 1.0j * np.random.randn()

    # Check operation in the face of empty arguments.

    f = Function()
    g = Function(lambda x: x)
    assert len(f - f) == 0
    assert len(f - g) == 0
    assert len(g - f) == 0

    # Check subtraction of two function objects.

    def f_op1(x):
        return np.zeros(len(x))

    f1 = Function(f_op1)
    assert check_sub_function_and_function(f1, f_op1, f1, f_op1, x_)

    def f_op2(x):
        return np.exp(x) - 1

    f2 = Function(f_op2)

    def g_op1(x):
        return 1.0 / (1 + x**2)

    g1 = Function(g_op1)
    assert check_sub_function_and_function(f2, f_op2, g1, g_op1, x_)

    def g_op2(x):
        return np.cos(1e4 * x)

    g2 = Function(g_op2)
    assert check_sub_function_and_function(f2, f_op2, g2, g_op2, x_)

    def g_op3(t):
        return np.sinh(t * np.exp(2.0 * np.pi * 1.0j / 6.0))

    g3 = Function(g_op3)
    assert check_sub_function_and_function(f2, f_op2, g3, g_op3, x_)

    # Check that direct construction and the binary minus op give comparable results.

    tol = 10.0 * np.spacing(1)
    f = Function(lambda x: x)
    g = Function(lambda x: np.cos(x) - 1)
    h1 = f - g
    h2 = Function(lambda x: x - (np.cos(x) - 1))
    h3 = h1 - h2
    assert np.linalg.norm(h3.coefficients, np.inf) < tol
Пример #3
0
def test_abs() -> None:
    x_ = -1 + 2.0 * np.random.rand(100)

    # Test a positive function:

    def F1(x):  # noqa
        return np.sin(x) + 2.0

    f = Function(lambda x: F1(x))
    h = f.abs()
    assert np.linalg.norm(h(x_) - f(x_), np.inf) < 10 * np.spacing(1)

    # Test a negative function:
    f2 = Function(lambda x: -F1(x))
    h = f2.abs()
    assert np.linalg.norm(h(x_) + f2(x_), np.inf) < 10 * np.spacing(1)

    # Test a complex-valued function:

    def F2(x_):  # noqa
        return np.exp(1.0j * np.pi * x_)

    f = Function(lambda x: F2(x))
    h = f.abs()
    assert np.linalg.norm(h(x_) - 1.0, np.inf) < 1e2 * np.spacing(1)
Пример #4
0
def test_addition() -> None:
    """Test that it can sum a list of fractions."""
    f1 = Function(lambda x: x)
    g1 = Function(lambda x: -x)
    h1 = f1 + g1
    assert h1.iszero()
    f2 = Function(lambda x: np.exp(-10 * x**2))
    g2 = Function(lambda x: np.sin(2.7181828 * np.pi * x))
    h2 = f2 + g2
    xx = np.linspace(-1, 1, 201 + np.random.randint(0, 10))
    error = f2(xx) + g2(xx) - h2(xx)
    assert np.linalg.norm(error, np.inf) < 100 * 1e-16
Пример #5
0
def test_construction_discrete_data() -> None:
    """Test Constuction with discrete x and y data."""
    x = np.linspace(0, 10, 101)
    y = np.linspace(0, 10, 101)
    func = Function(xdata=x, ydata=y)
    assert np.allclose(func.domain, np.array([0.0, 10.0]), atol=1e-15)
    assert len(func) == 2

    x = np.random.rand(10)
    y = np.random.rand(10)
    func = Function(xdata=x, ydata=y)
    error = func(x) - y
    assert np.linalg.norm(error, np.inf) < 100 * 1.0e-6
Пример #6
0
def test_rmul() -> None:
    """Test the multiplication of f, specified by f_op, by a scalar alpha Generate a few random points to use as test
    values.

    :return:
    """
    x_ = -1 + 2.0 * np.random.rand(100)

    # Random numbers to use as arbitrary multiplicative constants.
    alpha = -0.213251928283644 + 0.053493485412265j

    # Check multiplication by scalars.
    def f_op(x):
        return np.sin(x)

    f = Function(f_op)
    g1 = f * alpha
    g2 = alpha * f
    assert g1 == g2

    def g_exact(x):
        return f_op(x) * alpha

    assert np.linalg.norm(g1(x_) - g_exact(x_),
                          np.inf) < 10 * np.max(g1.vscale() * np.spacing(1))
Пример #7
0
def test_radd() -> None:
    # Generate a few random points to use as test values.
    np.random.seed(6178)
    x_ = -1 + 2.0 * np.random.rand(100)

    # A random number to use as an arbitrary additive constant.
    alpha = -0.184752428910640 + 0.079812805462665j

    # Check addition with scalars.

    def f_op(x):
        return np.sin(x)

    f = Function(f_op)

    # Test the addition of f, specified by f_op, to a scalar using
    # a grid of points in [-1  1] for testing samples.
    g1 = f + alpha
    g2 = alpha + f
    assert g1 == g2

    def g_exact(x):
        return f_op(x) + alpha

    tol = 10 * g1.vscale() * np.spacing(1)
    assert np.linalg.norm(g1(x_) - g_exact(x_), np.inf) <= tol
Пример #8
0
def test_construction_adaptive() -> None:
    """Test the construction process."""

    f = Function()
    assert len(f) == 0
    assert f is not None
    assert isinstance(f, Function)

    # Set the tolerance:
    tol = 100 * np.spacing(1)

    # Test on a scalar-valued function:
    def sin(t):
        return np.sin(t)

    g = Function(sin)
    values = g.coefficients_to_values()
    assert np.linalg.norm(sin(g.points) - values, np.inf) < tol
    assert np.abs(g.vscale() - np.sin(1.0)) < tol
    assert g.resolved

    coeffs = np.array([0.0, 1.0])
    result = Function(coefficients=coeffs)
    assert np.all(result.coefficients == coeffs)

    result = Function(fun=lambda x: x)
    assert np.all(result.coefficients == np.array([0.0, 1.0]))

    xx = np.linspace(-1, 1, 201 + np.random.randint(0, 10))

    def f_true(x):
        return np.exp(-10 * x**2)

    f = Function(fun=f_true)
    assert np.linalg.norm(f(xx) - f_true(xx), np.inf) < 100 * 1e-16

    xx = np.linspace(-1, 1, 201 + np.random.randint(0, 10))

    def g_true(x):
        return np.sin(4 * np.pi * x)

    g = Function(g_true)
    assert np.linalg.norm(g(xx) - g_true(xx), np.inf) < 100 * 1e-16
Пример #9
0
def test_construction_fixed_length() -> None:
    """Test construction when length is given
    :return:
    """
    def fun(x):
        return x

    func = Function(fun, length=0)
    assert len(func) == 0

    func = Function(lambda x: x, length=2)
    assert len(func) == 2

    func = Function(lambda x: np.abs(x), lengths=[2, 2], domain=[-1, 0, 1])
    assert len(func.pieces) == 2
    assert len(func.pieces[0]) == 2
    assert len(func.pieces[1]) == 2

    func = Function(fun, length=201)
    assert len(func) == 201
    xx = np.linspace(-1, 1, 201 + np.random.randint(0, 10))
    assert np.allclose(func(xx), xx, atol=1e-15)
    assert np.allclose(func.points, func.values, atol=1e-15)
Пример #10
0
def spotcheck_min(fun_op: Callable, exact_min: complex) -> float:
    # Spot-check the results for a given function.
    f = Function(fun_op)
    y = f.min()
    x = f.argmin()
    fx = fun_op(x)
    result = (np.abs(y - exact_min) < 1.0e2 * f.vscale() * np.spacing(1)) and \
             (np.abs(fx - exact_min) < 1.0e2 * f.vscale() * np.spacing(1))

    return result
Пример #11
0
def polyfit_global(
        x: np.ndarray,
        y: np.ndarray,
        degree: int = 1,
        domain: Union[None, Sequence[float], np.ndarray] = None) -> Function:
    """Degree n least squares polynomial approximation of data y taken on points x in a domain.

    :param x: x-values, np array
    :param y: y-values, i.e., data values, np array
    :param degree: degree of approximation, an integer
    :param domain: domain of approximation
    :return:
    """

    if domain is None:
        domain = [np.min(x), np.max(x)]

    domain = 1.0 * np.array(domain)
    a = domain[0]
    b = domain[-1]
    assert len(x) == len(
        y), f"len(x) = {len(x)}, while len(y) = {len(y)}, these must be equal"
    assert degree == int(
        degree), f'degree = {degree}, degree must be an integer'

    n = int(degree)

    # map points to [-1, 1]
    m = len(x)
    x_normalized = 2.0 * (x - a) / (b - a) - 1.0
    # construct the Chebyshev-Vandermonde matrix:
    Tx = np.zeros((m, n + 1))
    Tx[:, 0] = np.ones(m)
    if n > 0:
        Tx[:, 1] = x_normalized
        for k in range(1, n):
            Tx[:, k + 1] = 2.0 * x_normalized * Tx[:, k] - Tx[:, k - 1]

    # c, residuals, rank, singular_values = polyfit_jit(x, y, n, a, b)
    c, residuals, rank, singular_values = np.linalg.lstsq(Tx, y, rcond=None)

    # Make a function:
    return Function(coefficients=c, domain=domain)
Пример #12
0
def spotcheck_max(fun_op: Callable, exact_max: complex) -> float:
    """Spot-check the results for a given function."""
    f = Function(fun_op)
    y = f.max()
    x = f.argmax()
    fx = fun_op(x)

    # [TODO]: Try to get this tolerance down:
    result = (np.all(
        np.abs(y - exact_max) < 1.0e2 * f.vscale() * np.spacing(1))) and (
            np.all(
                np.abs(fx - exact_max) < 1.0e2 * f.vscale() * np.spacing(1)))

    return result
Пример #13
0
def polyfit(
        x: np.ndarray,
        y: np.ndarray,
        degree: Union[int, Sequence[int]] = 1,
        domain: Union[None, np.ndarray, Sequence[float]] = None) -> Function:
    """Least squares polynomial fitting to discrete data with piecewise domain splitting handled.

    :param x:
    :param y:
    :param degree: an array or a double len(degree) = len(domain) - 1
    :param domain: an array to specify where the breakpionts are
    :return:
    """

    if domain is None or len(domain) == 2:
        assert isinstance(degree, int)
        return polyfit_global(x, y, degree, domain)
    if len(domain) > 2:
        n_pieces = len(domain) - 1
        if isinstance(degree, Sequence):  # A list of degrees is passed
            assert n_pieces == len(
                degree), 'must specify degree for each domain'
            degrees = degree
        else:  # The degree passed is just an integer
            degrees = n_pieces * [int(degree)]
    else:
        raise AssertionError(
            f'domain = {domain}, domain must be for the form [a, b]')

    all_coefficients = n_pieces * [None]
    for j in range(n_pieces):
        a, b = domain[j], domain[j + 1]
        idx = (a <= x) & (x <= b)
        if np.sum(idx) == 0:
            c = np.array([])
        else:
            xj, yj = x[idx], y[idx]
            f = polyfit_global(xj, yj, degree=degrees[j], domain=[a, b])
            c = f.coefficients[0].copy()
        all_coefficients[j] = c

    return Function(coefficients=all_coefficients, domain=domain)
Пример #14
0
def test_rsub() -> None:
    # Generate a few random points to use as test values.
    x_ = -1.0 + 2.0 * np.random.rand(100)

    # A random number to use as an arbitrary additive constant.
    alpha = np.random.randn() + 1.0j * np.random.randn()

    def f_op(x):
        return np.sin(x)

    f = Function(f_op)

    # Test the subtraction of f, to and from a scalar using a grid of points
    g1 = f - alpha
    g2 = alpha - f
    assert g1 == g2

    def g_exact(x):
        return f_op(x) - alpha

    # [TODO] can we bring this tolerance down?
    tol = 1.0e2 * g1.vscale() * np.spacing(1)
    assert np.linalg.norm(g1(x_) - g_exact(x_), np.inf) <= tol
Пример #15
0
def test_roots() -> None:
    def func(x):
        return (x + 1) * 50

    f = Function(lambda x: special.j0(func(x)))
    r = func(f.roots())
    exact = np.array([
        2.40482555769577276862163, 5.52007811028631064959660,
        8.65372791291101221695437, 11.7915344390142816137431,
        14.9309177084877859477626, 18.0710639679109225431479,
        21.2116366298792589590784, 24.3524715307493027370579,
        27.4934791320402547958773, 30.6346064684319751175496,
        33.7758202135735686842385, 36.9170983536640439797695,
        40.0584257646282392947993, 43.1997917131767303575241,
        46.3411883716618140186858, 49.4826098973978171736028,
        52.6240518411149960292513, 55.7655107550199793116835,
        58.9069839260809421328344, 62.0484691902271698828525,
        65.1899648002068604406360, 68.3314693298567982709923,
        71.4729816035937328250631, 74.6145006437018378838205,
        77.7560256303880550377394, 80.8975558711376278637723,
        84.0390907769381901578795, 87.1806298436411536512617,
        90.3221726372104800557177, 93.4637187819447741711905,
        96.6052679509962687781216, 99.7468198586805964702799
    ])

    assert np.linalg.norm(r - exact, np.inf) < 1.0e1 * len(f) * np.spacing(1)

    k = 500
    f = Function(lambda x: np.sin(np.pi * k * x))
    r = f.roots()
    assert np.linalg.norm(r - (1.0 * np.r_[-k:k + 1]) / k,
                          np.inf) < 1e1 * len(f) * np.spacing(1)

    # Test a perturbed polynomial:
    f = Function(lambda x: (x - .1) * (x + .9) * x * (x - .9) + 1e-14 * x**5)
    r = f.roots()
    assert len(r) == 4
    assert np.linalg.norm(f(r), np.inf) < 1e2 * len(f) * np.spacing(1)

    # Test a some simple polynomials:
    f = Function(values=[-1.0, 1.0])
    r = f.roots()
    assert np.all(r == 0)

    # f = testclass.make([1  0  1])
    f = Function(values=[1.0, 0.0, 1.0])
    r = f.roots()
    assert len(r) == 2
    assert np.linalg.norm(r, np.inf) < np.spacing(1)

    # Test some complex roots:
    f = Function(lambda x: 1 + 25 * x**2)
    r = f.roots(complex_roots=True)
    assert len(r) == 2
    assert np.linalg.norm(r - np.r_[-1.0j, 1.0j] / 5.0,
                          np.inf) < 10 * np.spacing(1)

    # [TODO] This is failing:
    # f = Function(lambda x: (1 + 25*x**2)*np.exp(x))
    # r = f.roots(complex_roots=True, prune=True)
    # assert len(r), 2)
    # assert np.linalg.norm( r - np.r_[1.0j, -1.0j]/5.0, np.inf) < 10*len(f)*np.spacing(1))

    # [TODO] We get different number of roots
    # f = Function(lambda x: np.sin(100*np.pi*x))
    # r1 = f.roots(complex_roots=True, recurse=False)
    # r2 = f.roots(complex_roots=True)

    # assert len(r1), 201)
    # assert len(r2), 213)

    # Adding test for 'qz' flag:
    f = Function(lambda x: 1e-10 * x**3 + x**2 - 1e-12)
    r = f.roots(qz=True)
    assert len(r) != 0
    assert np.linalg.norm(f[r], np.inf) < 10 * np.spacing(1)

    # Add a rootfinding test for low degree non-even functions:
    f = Function(lambda x: (x - .5) * (x - 1.0 / 3.0))
    r = f.roots(qz=True)
    assert np.linalg.norm(f[r], np.inf) < np.spacing(1)
Пример #16
0
def test_mul() -> None:
    # Generate a few random points to use as test values.
    np.random.seed(6178)
    x_ = -1 + 2.0 * np.random.rand(100)

    # Random numbers to use as arbitrary multiplicative constants.
    alpha = -0.194758928283640 + 0.075474485412665j

    # Check operation in the face of empty arguments.

    f1 = Function()
    g1 = Function(lambda x: x)
    assert len(f1 * f1) == 0
    assert len(f1 * g1) == 0
    assert len(g1 * f1) == 0

    # Check multiplication by constant functions.

    def f_op2(x):
        return np.sin(x)

    f2 = Function(f_op2)

    def g_op2(x):
        return np.zeros(len(x)) + alpha

    g2 = Function(g_op2)
    assert check_mul_function_by_function(f2, f_op2, g2, g_op2, x_, False)

    # Spot-check multiplication of two function objects for a few test
    # functions.

    def f_op3(x):
        return np.ones(len(x))

    f3 = Function(f_op3)
    assert check_mul_function_by_function(f3, f_op3, f3, f_op3, x_, False)

    def f_op4(x):
        return np.exp(x) - 1.0

    f4 = Function(f_op4)

    def g_op3(x):
        return 1.0 / (1.0 + x**2)

    g3 = Function(g_op3)
    assert check_mul_function_by_function(f4, f_op4, g3, g_op3, x_, False)

    # If f and g are real then so must be f * g
    h = f4 * g3
    assert h.isreal()

    def g_op4(x):
        return np.cos(1.0e4 * x)

    g4 = Function(g_op4)
    assert check_mul_function_by_function(f4, f_op4, g4, g_op4, x_, False)

    def g_op5(t):
        return np.sinh(t * np.exp(2.0 * np.pi * 1.0j / 6.0))

    g5 = Function(g_op5)
    assert check_mul_function_by_function(f4, f_op4, g5, g_op5, x_, False)

    # Check specially handled cases, including some in which an adjustment for
    # positivity is performed.

    def f_op5(t):
        return np.sinh(t * np.exp(2.0 * np.pi * 1.0j / 6.0))

    f5 = Function(f_op5)
    assert check_mul_function_by_function(f5, f_op5, f5, f_op5, x_, False)

    def g_op6(t):
        return np.conjugate(np.sinh(t * np.exp(2.0 * np.pi * 1.0j / 6.0)))

    g6 = f5.conj()
    assert check_mul_function_by_function(f5, f_op5, g6, g_op6, x_, True)

    def f_op7(x):
        return np.exp(x) - 1.0

    f7 = Function(f_op7)
    assert check_mul_function_by_function(f7, f_op7, f7, f_op7, x_, True)

    # Check that multiplication and direct construction give similar results.

    tol = 50 * np.spacing(1)

    def g_op7(x):
        return 1.0 / (1.0 + x**2)

    g7 = Function(g_op7)
    h1 = f7 * g7
    h2 = Function(lambda x: f_op7(x) * g_op7(x))
    h2 = h2.prolong(len(h1))
    assert np.linalg.norm(h1.coefficients[0] - h2.coefficients[0],
                          np.inf) < tol
Пример #17
0
def test_poly() -> None:
    f = Function()
    assert len(f.poly()[0]) == 0

    f = Function(lambda x: 1.0 + 0.0 * x)
    assert np.linalg.norm(f.poly()[0] - np.ones(1),
                          np.inf) < f.vscale() * np.spacing(1)
    f = Function(lambda x: 1 + x)
    assert np.linalg.norm(f.poly()[0] - np.ones(2),
                          np.inf) < f.vscale() * np.spacing(1)
    f = Function(lambda x: 1 + x + x**2)
    assert np.linalg.norm(f.poly()[0] - np.ones(3),
                          np.inf) < f.vscale() * np.spacing(1)
    f = Function(lambda x: 1 + x + x**2 + x**3)
    assert np.linalg.norm(f.poly()[0] - np.ones(4),
                          np.inf) < f.vscale() * np.spacing(1)
    f = Function(lambda x: 1 + x + x**2 + x**3 + x**4)
    assert np.linalg.norm(f.poly()[0] - np.ones(5),
                          np.inf) < f.vscale() * np.spacing(1)
Пример #18
0
import matplotlib.pyplot as plt
import numpy as np

from numfun.function import Function

f = Function(lambda x: np.heaviside(x, 0), domain=[-1, 0, 1])

# Define a probability distribution
a = 0
b = 100
domain = [a, b]
x = Function(lambda x: x, domain=domain)
f = Function(lambda x: 2.0 * np.exp(-2.0 * x), domain=domain)
f.plot()

# What is the expected value and the variance:
E = (x * f).definite_integral()
V = (x**2 * f).definite_integral() - E**2

#%%
f = Function(lambda x: np.sin(x) + np.sin(5 * x**2), domain=[0, 10])
x = f.roots()
y = f(x)
plt.figure(figsize=(10, 8))
f.plot()
plt.plot(x, y, '.')

#%%
f = Function(fun=[
    lambda x: np.exp(-1.0 / x**2), lambda x: 0 * x,
    lambda x: np.exp(-1. / x**2)
Пример #19
0
def test_cumsum() -> None:
    # Generate a few random points to use as test values.
    x_ = 2 * np.random.rand(100) - 1

    # Spot-check antiderivatives for a couple of functions.  We verify that the
    # function antiderivatives match the true ones up to a constant by checking
    # that the standard deviation of the difference between the two on a large
    # random grid is small. We also check that evaluate(f.cumsum(), -1) == 0 each
    # time.

    f = Function(lambda x: np.exp(x) - 1.0)
    F = f.cumsum()  # noqa
    F_ex = lambda x: np.exp(x) - x  # noqa
    err = np.std(F[x_] - F_ex(x_))
    tol = 20 * F.vscale() * np.spacing(1)
    assert err < tol
    assert np.abs(F[-1]) < tol

    f = Function(lambda x: 1.0 / (1.0 + x**2))
    F = f.cumsum()  # noqa
    F_ex = lambda x: np.arctan(x)  # noqa
    err = np.std(F[x_] - F_ex(x_))
    tol = 10 * F.vscale() * np.spacing(1)
    assert err < tol
    assert np.abs(F[-1]) < tol

    f = Function(lambda x: np.cos(1.0e4 * x))
    F = f.cumsum()  # noqa
    F_ex = lambda x: np.sin(1.0e4 * x) / 1.0e4  # noqa
    err = F[x_] - F_ex(x_)
    tol = 10.0e4 * F.vscale() * np.spacing(1)
    assert (np.std(err) < tol) and (np.abs(F[-1]) < tol)

    z = np.exp(2 * np.pi * 1.0j / 6)
    f = Function(lambda t: np.sinh(t * z))
    F = f.cumsum()  # noqa
    F_ex = lambda t: np.cosh(t * z) / z  # noqa
    err = F[x_] - F_ex(x_)
    tol = 10 * F.vscale() * np.spacing(1)
    assert (np.std(err) < tol) and (np.abs(F[-1]) < tol)

    # Check that applying cumsum() and direct construction of the antiderivative
    # give the same results (up to a constant).
    f = Function(lambda x: np.sin(4.0 * x)**2)
    F = Function(lambda x: 0.5 * x - 0.0625 * np.sin(8 * x))  # noqa
    G = f.cumsum()  # noqa
    err = G - F
    tol = 10 * G.vscale() * np.spacing(1)
    values = err.coefficients_to_values(err.coefficients)
    assert (np.std(values) < tol) and (np.abs(G[-1]) < tol)

    # Check that f.diff().cumsum() == f and that f.cumsum().diff() == f up to a
    # constant.

    f = Function(lambda x: x * (x - 1.0) * np.sin(x) + 1.0)

    # def integral_f(x):
    #     return (-x ** 2 + x + 2) * np.cos(x) + x + (2 * x - 1) * np.sin(x)
    # F = lambda x: integral_f(x) - integral_f(-1)
    g = f.cumsum().diff()
    err = f(x_) - g(x_)
    tol = 10 * g.vscale() * np.spacing(1)
    assert np.linalg.norm(err, np.inf) < 100 * tol

    h = f.diff().cumsum()
    err = f(x_) - h(x_)
    tol = 10 * h.vscale() * np.spacing(1)
    assert (np.std(err) < tol) and (np.abs(h[-1]) < tol)
Пример #20
0
def y_true(xx):
    return a + b * xx + c * xx ** 2


def y_noisy(xx):
    return y_true(xx) + w


y = y_noisy(x)


# % interpolation in equidistant points doesn't really
# work:

g = Function(lambda xx: barycentric_interpolation(xx, y, x), length=len(y), domain=[0, 5])
g.plot()
plt.plot(x, y, 'g.', x, y - w, 'k--', x, g(x), 'r.')
plt.grid(True)
plt.show()

# % So we try cubic splines:
# x = np.arange(10)
# y = np.sin(x)
cs = CubicSpline(x, y)
xs = np.arange(0.0, 5, 0.05)
ys = y_true(xs)
plt.figure(figsize=(10, 8))
plt.plot(x, y, 'o', label='obs data')
plt.plot(xs, ys, label='true data')
plt.plot(xs, cs(xs), label="Spline")
Пример #21
0
def test_sum() -> None:
    # Spot-check integrals for a couple of functions.
    f = Function(lambda x: np.exp(x) - 1.0)
    assert np.abs(f.sum() -
                  0.350402387287603) < 10 * f.vscale() * np.spacing(1)

    f = Function(lambda x: 1. / (1 + x**2))
    assert np.abs(f.sum() - np.pi / 2.0) < 10 * f.vscale() * np.spacing(1)

    f = Function(lambda x: np.cos(1e4 * x))
    exact = -6.112287777765043e-05
    assert np.abs(f.sum() -
                  exact) / np.abs(exact) < 1e6 * f.vscale() * np.spacing(1)

    z = np.exp(2 * np.pi * 1.0j / 6.0)
    f = Function(lambda t: np.sinh(t * z))
    assert np.abs(f.sum()) < 10 * f.vscale() * np.spacing(1)

    # Check a few basic properties.
    a = 2.0
    b = -1.0j
    f = Function(lambda x: x * np.sin(x**2) - 1)
    df = f.diff()
    g = Function(lambda x: np.exp(-x**2))
    dg = g.diff()
    fg = f * g
    gdf = g * df
    fdg = f * dg

    tol_f = 10 * f.vscale() * np.spacing(1)
    tol_g = 10 * f.vscale() * np.spacing(1)
    tol_df = 10 * df.vscale() * np.spacing(1)
    tol_dg = 10 * dg.vscale() * np.spacing(1)
    tol_fg = 10 * fg.vscale() * np.spacing(1)
    tol_fdg = 10 * fdg.vscale() * np.spacing(1)
    tol_gdf = 10 * gdf.vscale() * np.spacing(1)

    # Linearity.
    assert np.abs((a * f + b * g).sum() -
                  (a * f.sum() + b * g.sum())) < max(tol_f, tol_g)

    # Integration-by-parts.
    assert np.abs(fdg.sum() - (fg(1) - fg(-1) - gdf.sum())) < np.max(
        np.r_[tol_fdg, tol_gdf, tol_fg])

    # Fundamental Theorem of Calculus.
    assert np.abs(df.sum() - (f(1) - f(-1))) < np.max(np.r_[tol_df, tol_f])
    assert np.abs(dg.sum() - (g(1) - g(-1))) < np.max(np.r_[tol_dg, tol_g])