Ejemplo n.º 1
0
def test_apply_inverse(solver, seed=1234, N=201, yerr=0.1):
    np.random.seed(seed)

    # Set up the solver.
    kernel = 1.0 * kernels.ExpSquaredKernel(0.5)
    kwargs = dict()
    if solver == HODLRSolver:
        kwargs["tol"] = 1e-10
    gp = GP(kernel, solver=solver, **kwargs)

    # Sample some data.
    x = np.sort(np.random.rand(N))
    y = gp.sample(x)
    gp.compute(x, yerr=yerr)

    K = gp.get_matrix(x)
    K[np.diag_indices_from(K)] += yerr**2

    b1 = np.linalg.solve(K, y)
    b2 = gp.apply_inverse(y)
    assert np.allclose(b1, b2)

    y = gp.sample(x, size=5).T
    b1 = np.linalg.solve(K, y)
    b2 = gp.apply_inverse(y)
    assert np.allclose(b1, b2)
Ejemplo n.º 2
0
def _general_metric(metric, N=100, ndim=3):
    kernel = 0.1 * kernels.ExpSquaredKernel(metric, ndim=ndim)

    x = np.random.rand(N, ndim)
    M0 = kernel.get_value(x)

    gp = GP(kernel)
    M1 = gp.get_matrix(x)
    assert np.allclose(M0, M1)

    # Compute the expected matrix.
    M2 = np.empty((N, N))
    for i in range(N):
        for j in range(N):
            r = x[i] - x[j]
            r2 = np.dot(r, np.linalg.solve(metric, r))
            M2[i, j] = 0.1 * np.exp(-0.5 * r2)

    if not np.allclose(M0, M2):
        print(M0)
        print()
        print(M2)
        print()
        print(M0 - M2)
        print()
        print(M0 / M2)

        L = np.linalg.cholesky(metric)
        i = N - 1
        j = N - 2
        r = x[j] - x[i]
        print(x[i], x[j])
        print("r = ", r)
        print("L.r = ", np.dot(L, r))
    assert np.allclose(M0, M2)
Ejemplo n.º 3
0
def _general_metric(metric, N=100, ndim=3):
    kernel = 0.1 * kernels.ExpSquaredKernel(metric, ndim=ndim)

    x = np.random.rand(N, ndim)
    M0 = kernel.get_value(x)

    gp = GP(kernel)
    M1 = gp.get_matrix(x)
    assert np.allclose(M0, M1)

    # Compute the expected matrix.
    M2 = np.empty((N, N))
    for i in range(N):
        for j in range(N):
            r = x[i] - x[j]
            r2 = np.dot(r, np.linalg.solve(metric, r))
            M2[i, j] = 0.1 * np.exp(-0.5*r2)

    if not np.allclose(M0, M2):
        print(M0)
        print()
        print(M2)
        print()
        print(M0 - M2)
        print()
        print(M0 / M2)

        L = np.linalg.cholesky(metric)
        i = N - 1
        j = N - 2
        r = x[j] - x[i]
        print(x[i], x[j])
        print("r = ", r)
        print("L.r = ", np.dot(L, r))
    assert np.allclose(M0, M2)
Ejemplo n.º 4
0
def test_prediction(solver, seed=42):
    """Basic sanity checks for GP regression."""

    np.random.seed(seed)

    kernel = kernels.ExpSquaredKernel(1.0)
    kwargs = dict()
    if solver == HODLRSolver:
        kwargs["tol"] = 1e-8
    gp = GP(kernel, solver=solver, white_noise=0.0, **kwargs)

    x0 = np.linspace(-10, 10, 500)
    x = np.sort(np.random.uniform(-10, 10, 300))
    gp.compute(x)

    y = np.sin(x)
    mu, cov = gp.predict(y, x0)

    Kstar = gp.get_matrix(x0, x)
    K = gp.get_matrix(x)
    K[np.diag_indices_from(K)] += 1.0
    mu0 = np.dot(Kstar, np.linalg.solve(K, y))
    print(np.abs(mu - mu0).max())
    assert np.allclose(mu, mu0)
Ejemplo n.º 5
0
def test_axis_algined_metric(seed=1234, N=100, ndim=3):
    np.random.seed(seed)

    kernel = 0.1 * kernels.ExpSquaredKernel(np.ones(ndim), ndim=ndim)

    x = np.random.rand(N, ndim)
    M0 = kernel.get_value(x)

    gp = GP(kernel)
    M1 = gp.get_matrix(x)
    assert np.allclose(M0, M1)

    # Compute the expected matrix.
    M2 = 0.1*np.exp(-0.5*np.sum((x[None, :, :] - x[:, None, :])**2, axis=-1))
    assert np.allclose(M0, M2)
Ejemplo n.º 6
0
def test_axis_algined_metric(seed=1234, N=100, ndim=3):
    np.random.seed(seed)

    kernel = 0.1 * kernels.ExpSquaredKernel(np.ones(ndim), ndim=ndim)

    x = np.random.rand(N, ndim)
    M0 = kernel.get_value(x)

    gp = GP(kernel)
    M1 = gp.get_matrix(x)
    assert np.allclose(M0, M1)

    # Compute the expected matrix.
    M2 = 0.1 * np.exp(-0.5 * np.sum(
        (x[None, :, :] - x[:, None, :])**2, axis=-1))
    assert np.allclose(M0, M2)