Пример #1
0
def test_predict(seed=42):
    np.random.seed(seed)
    x = np.linspace(1, 59, 300)
    t = np.sort(np.random.uniform(10, 50, 100))
    yerr = np.random.uniform(0.1, 0.5, len(t))
    y = np.sin(t)

    kernel = terms.RealTerm(0.1, 0.5)
    for term in [(0.6, 0.7, 1.0), (0.1, 0.05, 0.5, -0.1)]:
        kernel += terms.ComplexTerm(*term)
    gp = GP(kernel)

    gp.compute(t, yerr)
    K = gp.get_matrix(include_diagonal=True)
    Ks = gp.get_matrix(x, t)
    true_mu = np.dot(Ks, np.linalg.solve(K, y))
    true_cov = gp.get_matrix(x, x) - np.dot(Ks, np.linalg.solve(K, Ks.T))

    mu, cov = gp.predict(y, x)

    _, var = gp.predict(y, x, return_var=True)
    assert np.allclose(mu, true_mu)
    assert np.allclose(cov, true_cov)
    assert np.allclose(var, np.diag(true_cov))

    mu0, cov0 = gp.predict(y, t)
    mu, cov = gp.predict(y)
    assert np.allclose(mu0, mu)
    assert np.allclose(cov0, cov)
Пример #2
0
def test_nyquist_singularity(method, seed=4220):
    np.random.seed(seed)

    kernel = terms.ComplexTerm(1.0, np.log(1e-6), np.log(1.0))
    gp = GP(kernel, method=method)

    # Samples are very close to Nyquist with f = 1.0
    ts = np.array([0.0, 0.5, 1.0, 1.5])
    ts[1] = ts[1] + 1e-9 * np.random.randn()
    ts[2] = ts[2] + 1e-8 * np.random.randn()
    ts[3] = ts[3] + 1e-7 * np.random.randn()

    yerr = np.random.uniform(low=0.1, high=0.2, size=len(ts))
    y = np.random.randn(len(ts))

    gp.compute(ts, yerr)
    llgp = gp.log_likelihood(y)

    K = gp.get_matrix(ts)
    K[np.diag_indices_from(K)] += yerr**2.0

    ll = (-0.5 * np.dot(y, np.linalg.solve(K, y)) -
          0.5 * np.linalg.slogdet(K)[1] - 0.5 * len(y) * np.log(2.0 * np.pi))

    assert np.allclose(ll, llgp)
Пример #3
0
def test_log_likelihood(method, seed=42):
    np.random.seed(seed)
    x = np.sort(np.random.rand(10))
    yerr = np.random.uniform(0.1, 0.5, len(x))
    y = np.sin(x)

    kernel = terms.RealTerm(0.1, 0.5)
    gp = GP(kernel, method=method)
    with pytest.raises(RuntimeError):
        gp.log_likelihood(y)

    for term in [(0.6, 0.7, 1.0)]:
        kernel += terms.ComplexTerm(*term)
        gp = GP(kernel, method=method)

        assert gp.computed is False

        with pytest.raises(ValueError):
            gp.compute(np.random.rand(len(x)), yerr)

        gp.compute(x, yerr)
        assert gp.computed is True
        assert gp.dirty is False

        ll = gp.log_likelihood(y)
        K = gp.get_matrix(include_diagonal=True)
        ll0 = -0.5 * np.dot(y, np.linalg.solve(K, y))
        ll0 -= 0.5 * np.linalg.slogdet(K)[1]
        ll0 -= 0.5 * len(x) * np.log(2*np.pi)
        assert np.allclose(ll, ll0)

    # Check that changing the parameters "un-computes" the likelihood.
    gp.set_parameter_vector(gp.get_parameter_vector())
    assert gp.dirty is True
    assert gp.computed is False

    # Check that changing the parameters changes the likelihood.
    gp.compute(x, yerr)
    ll1 = gp.log_likelihood(y)
    params = gp.get_parameter_vector()
    params[0] += 0.1
    gp.set_parameter_vector(params)
    gp.compute(x, yerr)
    ll2 = gp.log_likelihood(y)
    assert not np.allclose(ll1, ll2)

    gp[1] += 0.1
    assert gp.dirty is True
    gp.compute(x, yerr)
    ll3 = gp.log_likelihood(y)
    assert not np.allclose(ll2, ll3)
Пример #4
0
def test_log_likelihood(seed=42):
    np.random.seed(seed)
    x = np.sort(np.random.rand(10))
    yerr = np.random.uniform(0.1, 0.5, len(x))
    y = np.sin(x)

    kernel = terms.RealTerm(0.1, 0.5)
    gp = GP(kernel)
    with pytest.raises(RuntimeError):
        gp.log_likelihood(y)

    termlist = [(0.1 + 10./j, 0.5 + 10./j) for j in range(1, 4)]
    termlist += [(1.0 + 10./j, 0.01 + 10./j, 0.5, 0.01) for j in range(1, 10)]
    termlist += [(0.6, 0.7, 1.0), (0.3, 0.05, 0.5, 0.6)]
    for term in termlist:
        if len(term) > 2:
            kernel += terms.ComplexTerm(*term)
        else:
            kernel += terms.RealTerm(*term)
        gp = GP(kernel)

        assert gp.computed is False

        with pytest.raises(ValueError):
            gp.compute(np.random.rand(len(x)), yerr)

        gp.compute(x, yerr)
        assert gp.computed is True
        assert gp.dirty is False

        ll = gp.log_likelihood(y)
        K = gp.get_matrix(include_diagonal=True)
        ll0 = -0.5 * np.dot(y, np.linalg.solve(K, y))
        ll0 -= 0.5 * np.linalg.slogdet(K)[1]
        ll0 -= 0.5 * len(x) * np.log(2*np.pi)
        assert np.allclose(ll, ll0)

    # Check that changing the parameters "un-computes" the likelihood.
    gp.set_parameter_vector(gp.get_parameter_vector())
    assert gp.dirty is True
    assert gp.computed is False

    # Check that changing the parameters changes the likelihood.
    gp.compute(x, yerr)
    ll1 = gp.log_likelihood(y)
    params = gp.get_parameter_vector()
    params[0] += 10.0
    gp.set_parameter_vector(params)
    gp.compute(x, yerr)
    ll2 = gp.log_likelihood(y)
    assert not np.allclose(ll1, ll2)

    gp[1] += 10.0
    assert gp.dirty is True
    gp.compute(x, yerr)
    ll3 = gp.log_likelihood(y)
    assert not np.allclose(ll2, ll3)

    # Test zero delta t
    ind = len(x) // 2
    x = np.concatenate((x[:ind], [x[ind]], x[ind:]))
    y = np.concatenate((y[:ind], [y[ind]], y[ind:]))
    yerr = np.concatenate((yerr[:ind], [yerr[ind]], yerr[ind:]))
    gp.compute(x, yerr)
    ll = gp.log_likelihood(y)
    K = gp.get_matrix(include_diagonal=True)
    ll0 = -0.5 * np.dot(y, np.linalg.solve(K, y))
    ll0 -= 0.5 * np.linalg.slogdet(K)[1]
    ll0 -= 0.5 * len(x) * np.log(2*np.pi)
    assert np.allclose(ll, ll0), "face"
Пример #5
0
def test_log_likelihood(with_general, seed=42):
    np.random.seed(seed)
    x = np.sort(np.random.rand(10))
    yerr = np.random.uniform(0.1, 0.5, len(x))
    y = np.sin(x)

    if with_general:
        U = np.vander(x - np.mean(x), 4).T
        V = U * np.random.rand(4)[:, None]
        A = np.sum(U * V, axis=0) + 1e-8
    else:
        A = np.empty(0)
        U = np.empty((0, 0))
        V = np.empty((0, 0))

    # Check quiet argument with a non-positive definite kernel.
    class NPDTerm(terms.Term):
        parameter_names = ("par1", )

        def get_real_coefficients(self, params):  # NOQA
            return [params[0]], [0.1]

    gp = GP(NPDTerm(-1.0))
    with pytest.raises(celerite.solver.LinAlgError):
        gp.compute(x, 0.0)
    with pytest.raises(celerite.solver.LinAlgError):
        gp.log_likelihood(y)
    assert np.isinf(gp.log_likelihood(y, quiet=True))
    if terms.HAS_AUTOGRAD:
        assert np.isinf(gp.grad_log_likelihood(y, quiet=True)[0])

    kernel = terms.RealTerm(0.1, 0.5)
    gp = GP(kernel)
    with pytest.raises(RuntimeError):
        gp.log_likelihood(y)

    termlist = [(0.1 + 10. / j, 0.5 + 10. / j) for j in range(1, 4)]
    termlist += [(1.0 + 10. / j, 0.01 + 10. / j, 0.5, 0.01)
                 for j in range(1, 10)]
    termlist += [(0.6, 0.7, 1.0), (0.3, 0.05, 0.5, 0.6)]
    for term in termlist:
        if len(term) > 2:
            kernel += terms.ComplexTerm(*term)
        else:
            kernel += terms.RealTerm(*term)
        gp = GP(kernel)

        assert gp.computed is False

        with pytest.raises(ValueError):
            gp.compute(np.random.rand(len(x)), yerr)

        gp.compute(x, yerr, A=A, U=U, V=V)
        assert gp.computed is True
        assert gp.dirty is False

        ll = gp.log_likelihood(y)
        K = gp.get_matrix(include_diagonal=True)
        ll0 = -0.5 * np.dot(y, np.linalg.solve(K, y))
        ll0 -= 0.5 * np.linalg.slogdet(K)[1]
        ll0 -= 0.5 * len(x) * np.log(2 * np.pi)
        assert np.allclose(ll, ll0)

    # Check that changing the parameters "un-computes" the likelihood.
    gp.set_parameter_vector(gp.get_parameter_vector())
    assert gp.dirty is True
    assert gp.computed is False

    # Check that changing the parameters changes the likelihood.
    gp.compute(x, yerr, A=A, U=U, V=V)
    ll1 = gp.log_likelihood(y)
    params = gp.get_parameter_vector()
    params[0] += 10.0
    gp.set_parameter_vector(params)
    gp.compute(x, yerr, A=A, U=U, V=V)
    ll2 = gp.log_likelihood(y)
    assert not np.allclose(ll1, ll2)

    gp[1] += 10.0
    assert gp.dirty is True
    gp.compute(x, yerr, A=A, U=U, V=V)
    ll3 = gp.log_likelihood(y)
    assert not np.allclose(ll2, ll3)

    # Test zero delta t
    ind = len(x) // 2
    x = np.concatenate((x[:ind], [x[ind]], x[ind:]))
    y = np.concatenate((y[:ind], [y[ind]], y[ind:]))
    yerr = np.concatenate((yerr[:ind], [yerr[ind]], yerr[ind:]))
    gp.compute(x, yerr)
    ll = gp.log_likelihood(y)
    K = gp.get_matrix(include_diagonal=True)
    ll0 = -0.5 * np.dot(y, np.linalg.solve(K, y))
    ll0 -= 0.5 * np.linalg.slogdet(K)[1]
    ll0 -= 0.5 * len(x) * np.log(2 * np.pi)
    assert np.allclose(ll, ll0)
Пример #6
0
def GPSpec_2Comp(wav,
                 flux,
                 flux_err,
                 shifts_in=None,
                 nsteps=2000,
                 nrange=3,
                 prefix='RR2'):
    # NB: input wavelengths should be in nm, flux continuum should be about 1
    K, N = wav.shape
    # Create 2-D array of scaled log wavelengths for fitting
    lwav = np.log(wav * 1e-9)  # in m
    lw0, lw1 = lwav.min(), lwav.max()
    x = (lwav - lw0) / (lw1 - lw0)
    # First do GP fit to individual spectra to get estimate of GP HPs
    print 'GP fit to individual spectra'
    HPs = np.zeros((K, 3))
    for i in range(K):
        xx = x[i, :].flatten()
        yy = flux[i, :].flatten()
        ee = flux_err[i, :].flatten()
        HPs[i, :] = Fit0_Jitter(xx, yy, ee, verbose=False)
    HPs = np.median(HPs, axis=0)
    print 'GP HPs:', HPs
    k = terms.Matern32Term(log_sigma=HPs[0], log_rho=HPs[1])
    k += terms.JitterTerm(log_sigma=HPs[2])
    gp1 = GP(k, mean=1.0)
    gp2 = GP(k, mean=1.0)
    # Initial (ML) estimate of parameters
    print "Starting ML fit"
    if shifts_in is None:
        shifts_in = np.zeros(2 * (K - 1))
    par_in = shifts_in / SPEED_OF_LIGHT / (lw1 - lw0)
    ML_par = np.array(Fit2(x, flux, gp1, gp2, verbose=False, par_in=par_in))
    par_ML = np.copy(ML_par)
    par_ML *= (lw1 - lw0) * SPEED_OF_LIGHT * 1e-3
    print "ML fit done"
    # MCMC
    print "Starting MCMC"
    ndim = len(ML_par)
    nwalkers = ndim * 4
    p0 = ML_par + 1e-4 * np.random.randn(nwalkers, ndim)
    sampler = emcee.EnsembleSampler(nwalkers,
                                    ndim,
                                    LP2,
                                    args=[gp1, gp2, x, flux])
    for i, result in enumerate(sampler.sample(p0, iterations=nsteps)):
        n = int((30 + 1) * float(i) / nsteps)
        print i
        sys.stdout.write("\r[{0}{1}]".format('#' * n, ' ' * (30 - n)))
    sys.stdout.write("\n")
    print("MCMC done")
    # find MAP parameters
    iMAP = np.argmax(sampler.flatlnprobability)
    MAP_par = sampler.flatchain[iMAP, :].flatten()
    # extract MCMC chains
    samples = sampler.chain
    Lprob = sampler.lnprobability
    # convert chains back to physical units: shifts in km/s
    samples_tpl = np.copy(samples)
    samples_tpl *= (lw1 - lw0) * SPEED_OF_LIGHT * 1e-3
    par_MAP = np.copy(MAP_par)
    par_MAP *= (lw1 - lw0) * SPEED_OF_LIGHT * 1e-3
    # parameter names for plots
    labels = []
    for i in range(K - 1):
        labels.append(r'$\delta v^1_{%d}$ (km/s)' % (i + 1))
    for i in range(K - 1):
        labels.append(r'$\delta v^2_{%d}$ (km/s)' % (i + 1))
    labels = np.array(labels)
    names = []
    for i in range(K - 1):
        names.append('dv1_%d (km/s)' % (i + 1))
    for i in range(K - 1):
        names.append('dv2_%d (km/s)' % (i + 1))
    names = np.array(names)
    # Plot the chains
    fig1 = plt.figure(figsize=(12, 2 * (K - 1) + 2))
    gs1 = gridspec.GridSpec(ndim + 1, 1)
    gs1.update(left=0.1, right=0.98, bottom=0.07, top=0.98, hspace=0)
    ax1 = plt.subplot(gs1[0, 0])
    plt.setp(ax1.get_xticklabels(), visible=False)
    plt.plot(Lprob.T, 'k-', alpha=0.2)
    plt.ylabel(r'$\ln P$')
    for i in range(ndim):
        print i, ndim, len(labels)
        axc = plt.subplot(gs1[i + 1, 0], sharex=ax1)
        if i < (ndim - 1):
            plt.setp(axc.get_xticklabels(), visible=False)
        plt.plot(samples_tpl[:, :, i].T, 'k-', alpha=0.2)
        plt.ylabel(labels[i])
    plt.xlim(0, nsteps)
    plt.xlabel('iteration number')
    # Discard burnout
    nburn = int(raw_input('Enter no. steps to discard as burnout: '))
    plt.axvline(nburn)
    # Evaluate and print the parameter ranges
    print '\n{:20s}: {:10s} {:10s} {:10s} - {:7s} + {:7s}'.format('Parameter', 'ML', 'MAP', \
                                                                    'Median','Error','Error')
    par50 = np.zeros(ndim)
    par84 = np.zeros(ndim)
    par16 = np.zeros(ndim)
    for i in range(ndim):
        sam = samples_tpl[:, :, i].flatten()
        b, m, f = np.percentile(sam, [16, 50, 84])
        par50[i] = m
        par16[i] = b
        par84[i] = f
        print '{:20s}: {:10.5f} {:10.5f} {:10.5f} - {:7.5f} + {:7.5f}'.format(names[i], \
                                                                                  par_ML[i], \
                                                                                  par_MAP[i], \
                                                                                  m, m-b, f-m)
    if prefix is None:
        return par_MAP, par50, par50 - par16, par84 - par50
    plt.savefig('%s_chains.png' % prefix)
    samples_flat = samples[:, nburn:, :].reshape(-1, ndim)
    samples_tpl_flat = samples_tpl[:, nburn:, :].reshape(-1, ndim)
    # Plot the parameter distributions
    fig2 = corner.corner(samples_tpl_flat, truths = par_MAP, labels = labels, show_titles = True, \
                            quantiles = [0.16, 0.84])
    plt.savefig('%s_corner.png' % prefix)
    # Plot the individual spectra with MAP fit
    xpred = np.copy(x)
    fpred, fpred_err, f1pred, f2pred = Pred2_2D(MAP_par,
                                                gp1,
                                                gp2,
                                                x,
                                                flux,
                                                flux_err,
                                                xpred=xpred)
    lwpred = (lw1 - lw0) * xpred + lw0
    wpred = np.exp(lwpred) * 1e9
    fig3 = plt.figure(figsize=(12, K + 1))
    gs3 = gridspec.GridSpec(K, 1)
    gs3.update(left=0.1, right=0.98, bottom=0.07, top=0.98, hspace=0)
    for i in range(K):
        if i == 0:
            ax1 = plt.subplot(gs3[0, 0])
        else:
            axc = plt.subplot(gs3[i, 0], sharex=ax1, sharey=ax1)
        if i < (K - 1):
            plt.setp(ax1.get_xticklabels(), visible=False)
        plt.plot(wpred[i, :], f1pred[i, :], 'C1')
        plt.plot(wpred[i, :], f2pred[i, :], 'C2')
        plt.fill_between(wpred[i,:], fpred[i,:] + 2 * fpred_err[i,:], \
                             fpred[i,:] - fpred_err[i,:], color = 'C0', alpha = 0.4, lw = 0)
        plt.plot(wpred[i, :], fpred[i, :], 'C0')
        plt.ylabel('spec. %d' % (i + 1))
        plt.errorbar(wav[i,:], flux[i,:], yerr = flux_err[i,:], \
                         fmt = ".k", ms = 3, mec = 'none', capsize = 0, alpha = 0.5, lw=0.5)

        plt.xlim(wav.min(), wav.max())
    plt.xlabel('wavelength (nm)')
    plt.savefig('%s_spectra.png' % prefix)
    # Plot the combined spectra with samples from MCMC chain
    s1 = np.append(0.0, MAP_par[:K - 1])
    x11d = (x + s1[:, None]).flatten()
    lw11d = (lw1 - lw0) * x11d + lw0
    w11d = np.exp(lw11d) * 1e9
    K1 = gp1.get_matrix(x11d)
    s2 = np.append(0.0, MAP_par[K - 1:])
    x21d = (x + s2[:, None]).flatten()
    lw21d = (lw1 - lw0) * x21d + lw0
    w21d = np.exp(lw21d) * 1e9
    K2 = gp2.get_matrix(x21d)
    y1derr = flux_err.flatten()
    Ktot = K1 + K2 + np.diag(y1derr**2)
    y1d = flux.flatten() - 1.0
    y11d = (flux - f2pred).flatten() + 1
    y21d = (flux - f1pred).flatten() + 1
    offset = 1.5 * (y11d.min() - 1)
    L = sla.cho_factor(Ktot)
    b = sla.cho_solve(L, y1d)
    fig4 = plt.figure(figsize=(12, 2 * nrange + 1))
    gs4 = gridspec.GridSpec(nrange, 1)
    gs4.update(left=0.1, right=0.98, bottom=0.07, top=0.98, hspace=0.15)
    ws = min(w11d.min(), w21d.min())
    wr = (max(w11d.max(), w21d.max()) - ws) / float(nrange)
    for i in range(nrange):
        if i == 0:
            ax1 = plt.subplot(gs4[0, 0])
        else:
            axc = plt.subplot(gs4[i, 0], sharey=ax1)
        wmin = ws + (i - 0.05) * wr
        wmax = ws + (i + 1.05) * wr
        l = (w11d >= wmin) * (w11d <= wmax)
        plt.errorbar(w11d[l], y11d[l], yerr = y1derr[l], fmt = ".k", capsize = 0, \
                         alpha = 0.5, ms = 2, mec='none')
        l = (w21d >= wmin) * (w21d <= wmax)
        plt.errorbar(w21d[l], y21d[l] + offset, yerr = y1derr[l], fmt = ".k", capsize = 0, \
                         alpha = 0.5, ms = 2, mec='none')
        wpred = np.linspace(wmin, wmax, 1000)
        lwpred = np.log(wpred * 1e-9)
        xpred = (lwpred - lw0) / (lw1 - lw0)
        isamp = np.random.randint(nsteps - nburn, size=10)
        for j in isamp:
            samp_params = samples_flat[j, :].flatten()
            s1 = samp_params[:K - 1]
            x1pred = (xpred + s1[:, None]).flatten()
            lw1pred = (lw1 - lw0) * x1pred + lw0
            w1pred = np.exp(lw1pred) * 1e9
            K1s = gp1.get_matrix(x1pred, x11d)
            s2 = samp_params[K - 1:]
            x2pred = (xpred + s2[:, None]).flatten()
            lw2pred = (lw1 - lw0) * x2pred + lw0
            w2pred = np.exp(lw2pred) * 1e9
            K2s = gp2.get_matrix(x2pred, x21d)
            Ks = K1s + K2s
            K1ss = gp1.get_matrix(x1pred)
            K2ss = gp2.get_matrix(x2pred)
            Kss = K1ss + K2ss
            mu1 = np.dot(K1s, b).reshape(x1pred.shape) + 1
            mu2 = np.dot(K2s, b).reshape(x2pred.shape) + 1
            inds1 = np.argsort(w1pred)
            plt.plot(w1pred[inds1], mu1[inds1], 'C0-', lw=0.5, alpha=0.5)
            inds2 = np.argsort(w2pred)
            plt.plot(w2pred[inds2],
                     mu2[inds2] + offset,
                     'C1-',
                     lw=0.5,
                     alpha=0.5)
        plt.xlim(wmin, wmax)
        plt.ylabel('flux')
    plt.xlabel('wavelength (nm)')
    plt.savefig('%s_combined.png' % prefix)
    return par_MAP, par50, par50 - par16, par84 - par50, [
        fig1, fig2, fig3, fig4
    ]