Beispiel #1
0
def test_nyquist_singularity(method, seed=4220):
    np.random.seed(seed)

    kernel = terms.ComplexTerm(1.0, np.log(1e-6), np.log(1.0))
    gp = GP(kernel, method=method)

    # Samples are very close to Nyquist with f = 1.0
    ts = np.array([0.0, 0.5, 1.0, 1.5])
    ts[1] = ts[1] + 1e-9 * np.random.randn()
    ts[2] = ts[2] + 1e-8 * np.random.randn()
    ts[3] = ts[3] + 1e-7 * np.random.randn()

    yerr = np.random.uniform(low=0.1, high=0.2, size=len(ts))
    y = np.random.randn(len(ts))

    gp.compute(ts, yerr)
    llgp = gp.log_likelihood(y)

    K = gp.get_matrix(ts)
    K[np.diag_indices_from(K)] += yerr**2.0

    ll = (-0.5 * np.dot(y, np.linalg.solve(K, y)) -
          0.5 * np.linalg.slogdet(K)[1] - 0.5 * len(y) * np.log(2.0 * np.pi))

    assert np.allclose(ll, llgp)
Beispiel #2
0
def test_predict(seed=42):
    np.random.seed(seed)
    x = np.linspace(1, 59, 300)
    t = np.sort(np.random.uniform(10, 50, 100))
    yerr = np.random.uniform(0.1, 0.5, len(t))
    y = np.sin(t)

    kernel = terms.RealTerm(0.1, 0.5)
    for term in [(0.6, 0.7, 1.0), (0.1, 0.05, 0.5, -0.1)]:
        kernel += terms.ComplexTerm(*term)
    gp = GP(kernel)

    gp.compute(t, yerr)
    K = gp.get_matrix(include_diagonal=True)
    Ks = gp.get_matrix(x, t)
    true_mu = np.dot(Ks, np.linalg.solve(K, y))
    true_cov = gp.get_matrix(x, x) - np.dot(Ks, np.linalg.solve(K, Ks.T))

    mu, cov = gp.predict(y, x)

    _, var = gp.predict(y, x, return_var=True)
    assert np.allclose(mu, true_mu)
    assert np.allclose(cov, true_cov)
    assert np.allclose(var, np.diag(true_cov))

    mu0, cov0 = gp.predict(y, t)
    mu, cov = gp.predict(y)
    assert np.allclose(mu0, mu)
    assert np.allclose(cov0, cov)
Beispiel #3
0
def test_log_likelihood(method, seed=42):
    np.random.seed(seed)
    x = np.sort(np.random.rand(10))
    yerr = np.random.uniform(0.1, 0.5, len(x))
    y = np.sin(x)

    kernel = terms.RealTerm(0.1, 0.5)
    gp = GP(kernel, method=method)
    with pytest.raises(RuntimeError):
        gp.log_likelihood(y)

    for term in [(0.6, 0.7, 1.0)]:
        kernel += terms.ComplexTerm(*term)
        gp = GP(kernel, method=method)

        assert gp.computed is False

        with pytest.raises(ValueError):
            gp.compute(np.random.rand(len(x)), yerr)

        gp.compute(x, yerr)
        assert gp.computed is True
        assert gp.dirty is False

        ll = gp.log_likelihood(y)
        K = gp.get_matrix(include_diagonal=True)
        ll0 = -0.5 * np.dot(y, np.linalg.solve(K, y))
        ll0 -= 0.5 * np.linalg.slogdet(K)[1]
        ll0 -= 0.5 * len(x) * np.log(2*np.pi)
        assert np.allclose(ll, ll0)

    # Check that changing the parameters "un-computes" the likelihood.
    gp.set_parameter_vector(gp.get_parameter_vector())
    assert gp.dirty is True
    assert gp.computed is False

    # Check that changing the parameters changes the likelihood.
    gp.compute(x, yerr)
    ll1 = gp.log_likelihood(y)
    params = gp.get_parameter_vector()
    params[0] += 0.1
    gp.set_parameter_vector(params)
    gp.compute(x, yerr)
    ll2 = gp.log_likelihood(y)
    assert not np.allclose(ll1, ll2)

    gp[1] += 0.1
    assert gp.dirty is True
    gp.compute(x, yerr)
    ll3 = gp.log_likelihood(y)
    assert not np.allclose(ll2, ll3)
Beispiel #4
0
def test_pickle(with_general, seed=42):
    solver = celerite.CholeskySolver()
    np.random.seed(seed)
    t = np.sort(np.random.rand(500))
    diag = np.random.uniform(0.1, 0.5, len(t))
    y = np.sin(t)

    if with_general:
        U = np.vander(t - np.mean(t), 4).T
        V = U * np.random.rand(4)[:, None]
        A = np.sum(U * V, axis=0) + 1e-8
    else:
        A = np.empty(0)
        U = np.empty((0, 0))
        V = np.empty((0, 0))

    alpha_real = np.array([1.3, 1.5])
    beta_real = np.array([0.5, 0.2])
    alpha_complex_real = np.array([1.0])
    alpha_complex_imag = np.array([0.1])
    beta_complex_real = np.array([1.0])
    beta_complex_imag = np.array([1.0])

    def compare(solver1, solver2):
        assert solver1.computed() == solver2.computed()
        if not solver1.computed():
            return
        assert np.allclose(solver1.log_determinant(),
                           solver2.log_determinant())
        assert np.allclose(solver1.dot_solve(y), solver2.dot_solve(y))

    s = pickle.dumps(solver, -1)
    solver2 = pickle.loads(s)
    compare(solver, solver2)

    solver.compute(0.0, alpha_real, beta_real, alpha_complex_real,
                   alpha_complex_imag, beta_complex_real, beta_complex_imag, A,
                   U, V, t, diag)
    solver2 = pickle.loads(pickle.dumps(solver, -1))
    compare(solver, solver2)

    # Test that models can be pickled too.
    kernel = terms.RealTerm(0.5, 0.1)
    kernel += terms.ComplexTerm(0.6, 0.7, 1.0)
    gp1 = GP(kernel)
    gp1.compute(t, diag)
    s = pickle.dumps(gp1, -1)
    gp2 = pickle.loads(s)
    assert np.allclose(gp1.log_likelihood(y), gp2.log_likelihood(y))
Beispiel #5
0
def test_predict(method, seed=42):
    np.random.seed(seed)
    x = np.sort(np.random.rand(10))
    yerr = np.random.uniform(0.1, 0.5, len(x))
    y = np.sin(x)

    kernel = terms.RealTerm(0.1, 0.5)
    for term in [(0.6, 0.7, 1.0)]:
        kernel += terms.ComplexTerm(*term)
    gp = GP(kernel, method=method)
    gp.compute(x, yerr)

    mu0, cov0 = gp.predict(y, x)
    mu, cov = gp.predict(y)
    assert np.allclose(mu0, mu)
    assert np.allclose(cov0, cov)
Beispiel #6
0
def test_build_gp(method, seed=42):
    kernel = terms.RealTerm(0.5, 0.1)
    kernel += terms.ComplexTerm(0.6, 0.7, 1.0)
    gp = GP(kernel, method=method)

    assert gp.vector_size == 5
    p = gp.get_parameter_vector()
    assert np.allclose(p, [0.5, 0.1, 0.6, 0.7, 1.0])

    gp.set_parameter_vector([0.5, 0.8, 0.6, 0.7, 2.0])
    p = gp.get_parameter_vector()
    assert np.allclose(p, [0.5, 0.8, 0.6, 0.7, 2.0])

    with pytest.raises(ValueError):
        gp.set_parameter_vector([0.5, 0.8, -0.6])

    with pytest.raises(ValueError):
        gp.set_parameter_vector("face1")
Beispiel #7
0
def test_pickle(method, seed=42):
    solver = get_solver(method)
    np.random.seed(seed)
    t = np.sort(np.random.rand(500))
    diag = np.random.uniform(0.1, 0.5, len(t))
    y = np.sin(t)

    alpha_real = np.array([1.3, 1.5])
    beta_real = np.array([0.5, 0.2])
    alpha_complex_real = np.array([1.0])
    alpha_complex_imag = np.array([0.1])
    beta_complex_real = np.array([1.0])
    beta_complex_imag = np.array([1.0])

    def compare(solver1, solver2):
        assert solver1.computed() == solver2.computed()
        if not solver1.computed():
            return
        assert np.allclose(solver1.log_determinant(),
                           solver2.log_determinant())
        assert np.allclose(solver1.dot_solve(y),
                           solver2.dot_solve(y))

    s = pickle.dumps(solver, -1)
    solver2 = pickle.loads(s)
    compare(solver, solver2)

    if method != "sparse":
        solver.compute(
            alpha_real, beta_real, alpha_complex_real, alpha_complex_imag,
            beta_complex_real, beta_complex_imag, t, diag
        )
        solver2 = pickle.loads(pickle.dumps(solver, -1))
        compare(solver, solver2)

    # Test that models can be pickled too.
    kernel = terms.RealTerm(0.5, 0.1)
    kernel += terms.ComplexTerm(0.6, 0.7, 1.0)
    gp1 = GP(kernel, method=method)
    gp1.compute(t, diag)
    s = pickle.dumps(gp1, -1)
    gp2 = pickle.loads(s)
    assert np.allclose(gp1.log_likelihood(y), gp2.log_likelihood(y))
Beispiel #8
0
def test_product(seed=42):
    np.random.seed(seed)
    t = np.sort(np.random.uniform(0, 5, 100))
    tau = t[:, None] - t[None, :]

    k1 = terms.RealTerm(log_a=0.1, log_c=0.5)
    k2 = terms.ComplexTerm(0.2, -3.0, 0.5, 0.01)
    k3 = terms.SHOTerm(1.0, 0.2, 3.0)

    K1 = k1.get_value(tau)
    K2 = k2.get_value(tau)
    K3 = k3.get_value(tau)

    assert np.allclose((k1 + k2).get_value(tau), K1 + K2)
    assert np.allclose((k3 + k2).get_value(tau), K3 + K2)
    assert np.allclose((k1 + k2 + k3).get_value(tau), K1 + K2 + K3)

    for (a, b), (A, B) in zip(
            product((k1, k2, k3, k1 + k2, k1 + k3, k2 + k3), (k1, k2, k3)),
            product((K1, K2, K3, K1 + K2, K1 + K3, K2 + K3), (K1, K2, K3))):
        assert np.allclose((a * b).get_value(tau), A * B)
Beispiel #9
0
    kernel = terms.RealTerm(log_a=0.1, log_c=0.5, bounds=bounds)
    b0 = kernel.get_parameter_bounds()
    assert all(np.allclose(a, b) for a, b in zip(b0, bounds))

    kernel = terms.RealTerm(log_a=0.1,
                            log_c=0.5,
                            bounds=dict(zip(["log_a", "log_c"], bounds)))
    assert all(
        np.allclose(a, b) for a, b in zip(b0, kernel.get_parameter_bounds()))


@pytest.mark.parametrize("k", [
    terms.RealTerm(log_a=0.1, log_c=0.5),
    terms.RealTerm(log_a=0.1, log_c=0.5) +
    terms.RealTerm(log_a=-0.1, log_c=0.7),
    terms.ComplexTerm(log_a=0.1, log_c=0.5, log_d=0.1),
    terms.ComplexTerm(log_a=0.1, log_b=-0.2, log_c=0.5, log_d=0.1),
    terms.SHOTerm(log_S0=0.1, log_Q=-1, log_omega0=0.5),
    terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5),
    terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5) +
    terms.RealTerm(log_a=0.1, log_c=0.4),
    terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5) *
    terms.RealTerm(log_a=0.1, log_c=0.4),
])
def test_jacobian(k, eps=1.34e-7):
    if not terms.HAS_AUTOGRAD:
        with pytest.raises(ImportError):
            jac = k.get_coeffs_jacobian()
        return

    v = k.get_parameter_vector()
Beispiel #10
0
from celerite import plot_setup
plot_setup.setup()

# Set up the dimensions of the problem
N = 2**np.arange(6, 20)
times = np.empty((len(N), 3))
times[:] = np.nan

# Simulate a "dataset"
np.random.seed(42)
t = np.sort(np.random.rand(np.max(N)))
yerr = np.random.uniform(0.1, 0.2, len(t))
y = np.sin(t)

# Set up the GP model
kernel = terms.RealTerm(1.0, 0.1) + terms.ComplexTerm(0.1, 2.0, 1.6)
gp = GP(kernel)

for i, n in enumerate(N):
    times[i, 0] = benchmark("gp.compute(t[:{0}], yerr[:{0}])".format(n),
                            "from __main__ import gp, t, yerr")

    gp.compute(t[:n], yerr[:n])
    times[i, 1] = benchmark("gp.log_likelihood(y[:{0}])".format(n),
                            "from __main__ import gp, y")

    if n <= 4096:
        times[i, 2] = benchmark(
            """
C = gp.get_matrix(t[:{0}])
C[np.diag_indices_from(C)] += yerr[:{0}]**2
def data():
    # Generate fake data
    np.random.seed(40582)
    x = np.sort(np.random.uniform(0, 10, 50))
    t = np.sort(np.random.uniform(-1, 12, 100))
    diag = np.random.uniform(0.1, 0.3, len(x))
    y = np.sin(x)
    return x, diag, y, t


test_terms = [
    cterms.RealTerm(log_a=np.log(2.5), log_c=np.log(1.1123)),
    cterms.RealTerm(log_a=np.log(12.345), log_c=np.log(1.5)) +
    cterms.RealTerm(log_a=np.log(0.5), log_c=np.log(1.1234)),
    cterms.ComplexTerm(log_a=np.log(10.0),
                       log_c=np.log(5.6),
                       log_d=np.log(2.1)),
    cterms.ComplexTerm(
        log_a=np.log(7.435),
        log_b=np.log(0.5),
        log_c=np.log(1.102),
        log_d=np.log(1.05),
    ),
    cterms.SHOTerm(log_S0=np.log(1.1),
                   log_Q=np.log(0.1),
                   log_omega0=np.log(1.2)),
    cterms.SHOTerm(log_S0=np.log(1.1),
                   log_Q=np.log(2.5),
                   log_omega0=np.log(1.2)),
    cterms.SHOTerm(
        log_S0=np.log(1.1), log_Q=np.log(2.5), log_omega0=np.log(1.2)) +
Beispiel #12
0
with open(fn, "w") as f:
    f.write(header)
print(header, end="")

# Simulate a "dataset"
np.random.seed(42)
t = np.sort(np.random.rand(np.max(N)))
yerr = np.random.uniform(0.1, 0.2, len(t))
y = np.sin(t)

for xi, j in enumerate(J):
    kernel = terms.RealTerm(1.0, 0.1)
    for k in range((2*j - 1) % 2):
        kernel += terms.RealTerm(1.0, 0.1)
    for k in range((2*j - 1) // 2):
        kernel += terms.ComplexTerm(0.1, 2.0, 1.6)
    coeffs = kernel.coefficients
    assert 2*j == len(coeffs[0]) + 2*len(coeffs[2]), "Wrong number of terms"

    if args.george:
        george_kernel = None
        for a, c in zip(*(coeffs[:2])):
            k = CeleriteKernel(a=a, b=0.0, c=c, d=0.0)
            george_kernel = k if george_kernel is None else george_kernel + k
        for a, b, c, d in zip(*(coeffs[2:])):
            k = CeleriteKernel(a=a, b=0.0, c=c, d=0.0)
            george_kernel = k if george_kernel is None else george_kernel + k
        solver = george.GP(george_kernel, solver=george.HODLRSolver)
    elif args.sparse:
        solver = SparseSolver()
    else:
Beispiel #13
0
 def get_terms(self):
     coeffs = self.get_complex_coefficients()
     return [terms.ComplexTerm(*(np.log(args))) for args in zip(*coeffs)]
Beispiel #14
0
    #should return gp but check for wn
    return k_out


def neo_update_kernel(theta, params):
    gp = george.GP(mean=0.0, fit_mean=False, white_noise=jitt)
    pass


from celerite import terms as cterms

#  2 or sp.log(10.) ?
T = {
    'Constant': 1.**2,
    'RealTerm': cterms.RealTerm(log_a=2., log_c=2.),
    'ComplexTerm': cterms.ComplexTerm(log_a=2., log_b=2., log_c=2., log_d=2.),
    'SHOTerm': cterms.SHOTerm(log_S0=2., log_Q=2., log_omega0=2.),
    'Matern32Term': cterms.Matern32Term(log_sigma=2., log_rho=2.0),
    'JitterTerm': cterms.JitterTerm(log_sigma=2.0)
}


def neo_term(terms):
    t_out = T[terms[0][0]]
    for f in range(len(terms[0])):
        if f == 0:
            pass
        else:
            t_out *= T[terms[0][f]]

    for i in range(len(terms)):
Beispiel #15
0
def test_log_likelihood(seed=42):
    np.random.seed(seed)
    x = np.sort(np.random.rand(10))
    yerr = np.random.uniform(0.1, 0.5, len(x))
    y = np.sin(x)

    kernel = terms.RealTerm(0.1, 0.5)
    gp = GP(kernel)
    with pytest.raises(RuntimeError):
        gp.log_likelihood(y)

    termlist = [(0.1 + 10./j, 0.5 + 10./j) for j in range(1, 4)]
    termlist += [(1.0 + 10./j, 0.01 + 10./j, 0.5, 0.01) for j in range(1, 10)]
    termlist += [(0.6, 0.7, 1.0), (0.3, 0.05, 0.5, 0.6)]
    for term in termlist:
        if len(term) > 2:
            kernel += terms.ComplexTerm(*term)
        else:
            kernel += terms.RealTerm(*term)
        gp = GP(kernel)

        assert gp.computed is False

        with pytest.raises(ValueError):
            gp.compute(np.random.rand(len(x)), yerr)

        gp.compute(x, yerr)
        assert gp.computed is True
        assert gp.dirty is False

        ll = gp.log_likelihood(y)
        K = gp.get_matrix(include_diagonal=True)
        ll0 = -0.5 * np.dot(y, np.linalg.solve(K, y))
        ll0 -= 0.5 * np.linalg.slogdet(K)[1]
        ll0 -= 0.5 * len(x) * np.log(2*np.pi)
        assert np.allclose(ll, ll0)

    # Check that changing the parameters "un-computes" the likelihood.
    gp.set_parameter_vector(gp.get_parameter_vector())
    assert gp.dirty is True
    assert gp.computed is False

    # Check that changing the parameters changes the likelihood.
    gp.compute(x, yerr)
    ll1 = gp.log_likelihood(y)
    params = gp.get_parameter_vector()
    params[0] += 10.0
    gp.set_parameter_vector(params)
    gp.compute(x, yerr)
    ll2 = gp.log_likelihood(y)
    assert not np.allclose(ll1, ll2)

    gp[1] += 10.0
    assert gp.dirty is True
    gp.compute(x, yerr)
    ll3 = gp.log_likelihood(y)
    assert not np.allclose(ll2, ll3)

    # Test zero delta t
    ind = len(x) // 2
    x = np.concatenate((x[:ind], [x[ind]], x[ind:]))
    y = np.concatenate((y[:ind], [y[ind]], y[ind:]))
    yerr = np.concatenate((yerr[:ind], [yerr[ind]], yerr[ind:]))
    gp.compute(x, yerr)
    ll = gp.log_likelihood(y)
    K = gp.get_matrix(include_diagonal=True)
    ll0 = -0.5 * np.dot(y, np.linalg.solve(K, y))
    ll0 -= 0.5 * np.linalg.slogdet(K)[1]
    ll0 -= 0.5 * len(x) * np.log(2*np.pi)
    assert np.allclose(ll, ll0), "face"
Beispiel #16
0
def test_log_likelihood(with_general, seed=42):
    np.random.seed(seed)
    x = np.sort(np.random.rand(10))
    yerr = np.random.uniform(0.1, 0.5, len(x))
    y = np.sin(x)

    if with_general:
        U = np.vander(x - np.mean(x), 4).T
        V = U * np.random.rand(4)[:, None]
        A = np.sum(U * V, axis=0) + 1e-8
    else:
        A = np.empty(0)
        U = np.empty((0, 0))
        V = np.empty((0, 0))

    # Check quiet argument with a non-positive definite kernel.
    class NPDTerm(terms.Term):
        parameter_names = ("par1", )

        def get_real_coefficients(self, params):  # NOQA
            return [params[0]], [0.1]

    gp = GP(NPDTerm(-1.0))
    with pytest.raises(celerite.solver.LinAlgError):
        gp.compute(x, 0.0)
    with pytest.raises(celerite.solver.LinAlgError):
        gp.log_likelihood(y)
    assert np.isinf(gp.log_likelihood(y, quiet=True))
    if terms.HAS_AUTOGRAD:
        assert np.isinf(gp.grad_log_likelihood(y, quiet=True)[0])

    kernel = terms.RealTerm(0.1, 0.5)
    gp = GP(kernel)
    with pytest.raises(RuntimeError):
        gp.log_likelihood(y)

    termlist = [(0.1 + 10. / j, 0.5 + 10. / j) for j in range(1, 4)]
    termlist += [(1.0 + 10. / j, 0.01 + 10. / j, 0.5, 0.01)
                 for j in range(1, 10)]
    termlist += [(0.6, 0.7, 1.0), (0.3, 0.05, 0.5, 0.6)]
    for term in termlist:
        if len(term) > 2:
            kernel += terms.ComplexTerm(*term)
        else:
            kernel += terms.RealTerm(*term)
        gp = GP(kernel)

        assert gp.computed is False

        with pytest.raises(ValueError):
            gp.compute(np.random.rand(len(x)), yerr)

        gp.compute(x, yerr, A=A, U=U, V=V)
        assert gp.computed is True
        assert gp.dirty is False

        ll = gp.log_likelihood(y)
        K = gp.get_matrix(include_diagonal=True)
        ll0 = -0.5 * np.dot(y, np.linalg.solve(K, y))
        ll0 -= 0.5 * np.linalg.slogdet(K)[1]
        ll0 -= 0.5 * len(x) * np.log(2 * np.pi)
        assert np.allclose(ll, ll0)

    # Check that changing the parameters "un-computes" the likelihood.
    gp.set_parameter_vector(gp.get_parameter_vector())
    assert gp.dirty is True
    assert gp.computed is False

    # Check that changing the parameters changes the likelihood.
    gp.compute(x, yerr, A=A, U=U, V=V)
    ll1 = gp.log_likelihood(y)
    params = gp.get_parameter_vector()
    params[0] += 10.0
    gp.set_parameter_vector(params)
    gp.compute(x, yerr, A=A, U=U, V=V)
    ll2 = gp.log_likelihood(y)
    assert not np.allclose(ll1, ll2)

    gp[1] += 10.0
    assert gp.dirty is True
    gp.compute(x, yerr, A=A, U=U, V=V)
    ll3 = gp.log_likelihood(y)
    assert not np.allclose(ll2, ll3)

    # Test zero delta t
    ind = len(x) // 2
    x = np.concatenate((x[:ind], [x[ind]], x[ind:]))
    y = np.concatenate((y[:ind], [y[ind]], y[ind:]))
    yerr = np.concatenate((yerr[:ind], [yerr[ind]], yerr[ind:]))
    gp.compute(x, yerr)
    ll = gp.log_likelihood(y)
    K = gp.get_matrix(include_diagonal=True)
    ll0 = -0.5 * np.dot(y, np.linalg.solve(K, y))
    ll0 -= 0.5 * np.linalg.slogdet(K)[1]
    ll0 -= 0.5 * len(x) * np.log(2 * np.pi)
    assert np.allclose(ll, ll0)