Beispiel #1
0
def test_grad_log_likelihood(kernel, seed=42, eps=1.34e-7):
    np.random.seed(seed)
    x = np.sort(np.random.rand(100))
    yerr = np.random.uniform(0.1, 0.5, len(x))
    y = np.sin(x)

    if not terms.HAS_AUTOGRAD:
        gp = GP(kernel)
        gp.compute(x, yerr)
        with pytest.raises(ImportError):
            _, grad = gp.grad_log_likelihood(y)
        return

    for fit_mean in [True, False]:
        gp = GP(kernel, fit_mean=fit_mean)
        gp.compute(x, yerr)
        _, grad = gp.grad_log_likelihood(y)
        grad0 = np.empty_like(grad)

        v = gp.get_parameter_vector()
        for i, pval in enumerate(v):
            v[i] = pval + eps
            gp.set_parameter_vector(v)
            ll = gp.log_likelihood(y)

            v[i] = pval - eps
            gp.set_parameter_vector(v)
            ll -= gp.log_likelihood(y)

            grad0[i] = 0.5 * ll / eps
            v[i] = pval
        assert np.allclose(grad, grad0)
Beispiel #2
0
def test_grad_log_likelihood(kernel, with_general, seed=42, eps=1.34e-7):
    np.random.seed(seed)
    x = np.sort(np.random.rand(100))
    yerr = np.random.uniform(0.1, 0.5, len(x))
    y = np.sin(x)

    if with_general:
        U = np.vander(x - np.mean(x), 4).T
        V = U * np.random.rand(4)[:, None]
        A = np.sum(U * V, axis=0) + 1e-8
    else:
        A = np.empty(0)
        U = np.empty((0, 0))
        V = np.empty((0, 0))

    if not terms.HAS_AUTOGRAD:
        gp = GP(kernel)
        gp.compute(x, yerr, A=A, U=U, V=V)
        with pytest.raises(ImportError):
            _, grad = gp.grad_log_likelihood(y)
        return

    for fit_mean in [True, False]:
        gp = GP(kernel, fit_mean=fit_mean)
        gp.compute(x, yerr, A=A, U=U, V=V)
        _, grad = gp.grad_log_likelihood(y)
        grad0 = np.empty_like(grad)

        v = gp.get_parameter_vector()
        for i, pval in enumerate(v):
            v[i] = pval + eps
            gp.set_parameter_vector(v)
            ll = gp.log_likelihood(y)

            v[i] = pval - eps
            gp.set_parameter_vector(v)
            ll -= gp.log_likelihood(y)

            grad0[i] = 0.5 * ll / eps
            v[i] = pval
        assert np.allclose(grad, grad0)
Beispiel #3
0
def test_log_likelihood(with_general, seed=42):
    np.random.seed(seed)
    x = np.sort(np.random.rand(10))
    yerr = np.random.uniform(0.1, 0.5, len(x))
    y = np.sin(x)

    if with_general:
        U = np.vander(x - np.mean(x), 4).T
        V = U * np.random.rand(4)[:, None]
        A = np.sum(U * V, axis=0) + 1e-8
    else:
        A = np.empty(0)
        U = np.empty((0, 0))
        V = np.empty((0, 0))

    # Check quiet argument with a non-positive definite kernel.
    class NPDTerm(terms.Term):
        parameter_names = ("par1", )

        def get_real_coefficients(self, params):  # NOQA
            return [params[0]], [0.1]

    gp = GP(NPDTerm(-1.0))
    with pytest.raises(celerite.solver.LinAlgError):
        gp.compute(x, 0.0)
    with pytest.raises(celerite.solver.LinAlgError):
        gp.log_likelihood(y)
    assert np.isinf(gp.log_likelihood(y, quiet=True))
    if terms.HAS_AUTOGRAD:
        assert np.isinf(gp.grad_log_likelihood(y, quiet=True)[0])

    kernel = terms.RealTerm(0.1, 0.5)
    gp = GP(kernel)
    with pytest.raises(RuntimeError):
        gp.log_likelihood(y)

    termlist = [(0.1 + 10. / j, 0.5 + 10. / j) for j in range(1, 4)]
    termlist += [(1.0 + 10. / j, 0.01 + 10. / j, 0.5, 0.01)
                 for j in range(1, 10)]
    termlist += [(0.6, 0.7, 1.0), (0.3, 0.05, 0.5, 0.6)]
    for term in termlist:
        if len(term) > 2:
            kernel += terms.ComplexTerm(*term)
        else:
            kernel += terms.RealTerm(*term)
        gp = GP(kernel)

        assert gp.computed is False

        with pytest.raises(ValueError):
            gp.compute(np.random.rand(len(x)), yerr)

        gp.compute(x, yerr, A=A, U=U, V=V)
        assert gp.computed is True
        assert gp.dirty is False

        ll = gp.log_likelihood(y)
        K = gp.get_matrix(include_diagonal=True)
        ll0 = -0.5 * np.dot(y, np.linalg.solve(K, y))
        ll0 -= 0.5 * np.linalg.slogdet(K)[1]
        ll0 -= 0.5 * len(x) * np.log(2 * np.pi)
        assert np.allclose(ll, ll0)

    # Check that changing the parameters "un-computes" the likelihood.
    gp.set_parameter_vector(gp.get_parameter_vector())
    assert gp.dirty is True
    assert gp.computed is False

    # Check that changing the parameters changes the likelihood.
    gp.compute(x, yerr, A=A, U=U, V=V)
    ll1 = gp.log_likelihood(y)
    params = gp.get_parameter_vector()
    params[0] += 10.0
    gp.set_parameter_vector(params)
    gp.compute(x, yerr, A=A, U=U, V=V)
    ll2 = gp.log_likelihood(y)
    assert not np.allclose(ll1, ll2)

    gp[1] += 10.0
    assert gp.dirty is True
    gp.compute(x, yerr, A=A, U=U, V=V)
    ll3 = gp.log_likelihood(y)
    assert not np.allclose(ll2, ll3)

    # Test zero delta t
    ind = len(x) // 2
    x = np.concatenate((x[:ind], [x[ind]], x[ind:]))
    y = np.concatenate((y[:ind], [y[ind]], y[ind:]))
    yerr = np.concatenate((yerr[:ind], [yerr[ind]], yerr[ind:]))
    gp.compute(x, yerr)
    ll = gp.log_likelihood(y)
    K = gp.get_matrix(include_diagonal=True)
    ll0 = -0.5 * np.dot(y, np.linalg.solve(K, y))
    ll0 -= 0.5 * np.linalg.slogdet(K)[1]
    ll0 -= 0.5 * len(x) * np.log(2 * np.pi)
    assert np.allclose(ll, ll0)