示例#1
0
    def __init__(self, x, y, yerr, subdivisions):
        """
        Initialize global variables of Gaussian Process Regression Interpolator
    
        Args:
            x (array): Independent variable
            y (array): Dependent variable
            yerr (array): Uncertainty on y
            subdivisions: The number of subdivisions between data points
        """

        # Define kernels
        kernel_expsq = 38**2 * kernels.ExpSquaredKernel(metric=10**2)
        kernel_periodic = 150**2 * kernels.ExpSquaredKernel(
            2**2) * kernels.ExpSine2Kernel(gamma=0.05, log_period=np.log(11))
        kernel_poly = 5**2 * kernels.RationalQuadraticKernel(
            log_alpha=np.log(.78), metric=1.2**2)
        kernel_extra = 5**2 * kernels.ExpSquaredKernel(1.6**2)
        kernel = kernel_expsq + kernel_periodic + kernel_poly + kernel_extra

        # Create GP object
        self.gp = george.GP(kernel, mean=np.mean(y), fit_mean=False)
        self.gp.compute(x, yerr)

        # Set global variables
        self.ndim = len(self.gp)
        self.x = x
        self.y = y
        self.yerr = yerr
        self.subdivisions = subdivisions
        self.priors = [prior.Prior(0, 1) for i in range(self.ndim)]
        self.x_predict = np.linspace(min(self.x), max(self.x),
                                     subdivisions * (len(self.x) - 1) + 1)
示例#2
0
def test_bounds():
    kernel = 10 * kernels.ExpSquaredKernel(1.0, metric_bounds=[(None, 4.0)])
    kernel += 0.5 * kernels.RationalQuadraticKernel(log_alpha=0.1, metric=5.0)
    gp = GP(kernel, white_noise=LinearWhiteNoise(1.0, 0.1))

    # Test bounds length.
    assert len(gp.get_parameter_bounds()) == len(gp.get_parameter_vector())
    gp.freeze_all_parameters()
    gp.thaw_parameter("white_noise:m")
    assert len(gp.get_parameter_bounds()) == len(gp.get_parameter_vector())

    # Test invalid bounds specification.
    with pytest.raises(ValueError):
        kernels.ExpSine2Kernel(gamma=0.1, log_period=5.0, bounds=[10.0])
示例#3
0
文件: gpmc.py 项目: nikzadb/GPplus
 def kernel_gp(self, kernelname = 'expsquared'):
     """ Kernel defininition
     Standard kernels provided with george, evtl replaced with manual customizable kernels
     User can change function to add new kernels or comninations thereof as much as required 
     :param kernelname: name of kernel, either 'expsquared', 'matern32', or 'rationalq'
     """
     # Kernel for spatial 2D Gaussian Process. Default Exponential Squared Kernel. Change accordingly below.
     # Kernels are initialised with weight and length (1 by default)
     if kernelname == 'expsquared':
         k0 = 1. * kernels.ExpSquaredKernel(1., ndim = 2)  # + kernels.WhiteKernel(1.,ndim=2)
     # other possible kernels as well as combinations:
     if kernelname == 'matern32':
         k0 = 1. * kernels.Matern32Kernel(1., ndim = 2)
     # k2 = 2.4 ** 2 * kernels.ExpSquaredKernel(90 ** 2) * kernels.ExpSine2Kernel(2.0 / 1.3 ** 2, 1.0)
     if kernelname == 'rationalq':
         k0 =  1. * kernels.RationalQuadraticKernel(0.78, 1.2, ndim=2)
     # k4 = kernels.WhiteKernel(1., ndim=2)
     return k0 # + k1 + k2 + ...
示例#4
0
def get_kernel(architecture, kernel_name, domain_name_lst, n_dims):

    if architecture == "trans":
        mapping = trans_domain_kernel_mapping
    else:
        mapping = None

    kernel = None
    initial_ls = np.ones([n_dims])

    if kernel_name == "constant":
        kernel = kernels.ConstantKernel(1, ndim=n_dims)
    elif kernel_name == "polynomial":
        kernel = kernels.PolynomialKernel(log_sigma2=1, order=3, ndim=n_dims)
    elif kernel_name == "linear":
        kernel = kernels.LinearKernel(log_gamma2=1, order=3, ndim=n_dims)
    elif kernel_name == "dotproduct":
        kernel = kernels.DotProductKernel(ndim=n_dims)
    elif kernel_name == "exp":
        kernel = kernels.ExpKernel(initial_ls, ndim=n_dims)
    elif kernel_name == "expsquared":
        kernel = kernels.ExpSquaredKernel(initial_ls, ndim=n_dims)
    elif kernel_name == "matern32":
        kernel = kernels.Matern32Kernel(initial_ls, ndim=n_dims)
    elif kernel_name == "matern52":
        kernel = kernels.Matern52Kernel(initial_ls, ndim=n_dims)
    elif kernel_name == "rationalquadratic":
        kernel = kernels.RationalQuadraticKernel(log_alpha=1,
                                                 metric=initial_ls,
                                                 ndim=n_dims)
    elif kernel_name == "expsine2":
        kernel = kernels.ExpSine2Kernel(1, 2, ndim=n_dims)
    elif kernel_name == "heuristic":
        kernel = mapping[domain_name_lst[0]](ndim=n_dims, axes=0)
        for i in range(len(domain_name_lst[1:])):
            d = domain_name_lst[1:][i]
            kernel += mapping[d](ndim=n_dims, axes=i)
    elif kernel_name == "logsquared":
        kernel = kernels.LogSquaredKernel(initial_ls, ndim=n_dims)

    return kernel
def sample_curve_v2(x,y,err,xsample_range,num_of_curves,filename):
    ls = get_ls(x,y,err)

    k1 = np.var(y)* kernels.ExpSquaredKernel(ls**2)
    k2 = 1 * kernels.RationalQuadraticKernel(log_alpha=1, metric=ls**2)
    kernel = k1 + k2

    gp = george.GP(kernel, fit_mean=True)
    gp.compute(x,err)

    # Define the objective function (negative log-likelihood in this case).
    def nll(p):
        gp.set_parameter_vector(p)
        ll = gp.lnlikelihood(y, quiet=True)
        return -ll if np.isfinite(ll) else 1e25

    # And the gradient of the objective function.
    def grad_nll(p):
        gp.set_parameter_vector(p)
        return -gp.grad_lnlikelihood(y, quiet=True)

    p0 = gp.get_parameter_vector()
    results = op.minimize(nll, p0, jac=grad_nll, method="L-BFGS-B")
    gp.set_parameter_vector(results.x)

    xnew = xsample_range
    ynew = gp.sample_conditional(y,xnew,number_of_curves)

    t = np.linspace(np.min(x), np.max(x), 100)
    mu, cov = gp.predict(y, t)

    pl.errorbar(x,y,err,fmt='kx')
    pl.plot(t,mu)
    pl.fill_between(t, mu - 2*np.sqrt(std**2), mu + 2*np.sqrt(std**2), color='blue', alpha=0.2)
    pl.savefig(filename+'.pdf')

    # mu, cov = gp.predict(y, t)
    # std = np.sqrt(np.diag(cov))


    return xnew,ynew
示例#6
0
    def get_kernel(self, kernel_name, i):
        """get individual kernels"""
        metric = (1. / np.exp(self.coeffs[kernel_name]))**2
        if self.gp_code_name == 'george':
            if kernel_name == 'Matern32':
                kernel = kernels.Matern32Kernel(metric,
                                                ndim=self.nkernels,
                                                axes=i)
            if kernel_name == 'ExpSquared':
                kernel = kernels.ExpSquaredKernel(metric,
                                                  ndim=self.nkernels,
                                                  axes=i)
            if kernel_name == 'RationalQuadratic':
                kernel = kernels.RationalQuadraticKernel(log_alpha=1,
                                                         metric=metric,
                                                         ndim=self.nkernels,
                                                         axes=i)
            if kernel_name == 'Exp':
                kernel = kernels.ExpKernel(metric, ndim=self.nkernels, axes=i)

        if self.gp_code_name == 'tinygp':
            if kernel_name == 'Matern32':
                kernel = tinygp.kernels.Matern32(metric,
                                                 ndim=self.nkernels,
                                                 axes=i)
            if kernel_name == 'ExpSquared':
                kernel = tinygp.kernels.ExpSquared(metric,
                                                   ndim=self.nkernels,
                                                   axes=i)
            if kernel_name == 'RationalQuadratic':
                kernel = tinygp.kernels.RationalQuadratic(alpha=1,
                                                          scale=metric,
                                                          ndim=self.nkernels,
                                                          axes=i)
            if kernel_name == 'Exp':
                kernel = tinygp.kernels.Exp(metric, ndim=self.nkernels, axes=i)

        return kernel
示例#7
0
def test_parameters():
    kernel = 10 * kernels.ExpSquaredKernel(1.0)
    kernel += 0.5 * kernels.RationalQuadraticKernel(log_alpha=0.1, metric=5.0)
    gp = GP(kernel, white_noise=LinearWhiteNoise(1.0, 0.1))

    n = len(gp.get_parameter_vector())
    assert n == len(gp.get_parameter_names())
    assert n - 2 == len(kernel.get_parameter_names())

    gp.freeze_parameter(gp.get_parameter_names()[0])
    assert n - 1 == len(gp.get_parameter_names())
    assert n - 1 == len(gp.get_parameter_vector())

    gp.freeze_all_parameters()
    assert len(gp.get_parameter_names()) == 0
    assert len(gp.get_parameter_vector()) == 0

    gp.kernel.thaw_all_parameters()
    gp.white_noise.thaw_all_parameters()
    assert n == len(gp.get_parameter_vector())
    assert n == len(gp.get_parameter_names())

    assert np.allclose(kernel[0], np.log(10.))
data = co2.load_pandas().data
t = 2000 + (np.array(data.index.to_julian_date()) - 2451545.0) / 365.25
y = np.array(data.co2)
m = np.isfinite(t) & np.isfinite(y) & (t < 1996)
t, y = t[m][::4], y[m][::4]

plt.plot(t, y, ".k")
plt.xlim(t.min(), t.max())
plt.xlabel("year")
plt.ylabel("CO$_2$ in ppm")

#Load kernels
k1 = 66**2 * kernels.ExpSquaredKernel(metric=67**2)
k2 = 2.4**2 * kernels.ExpSquaredKernel(90**2) * kernels.ExpSine2Kernel(
    gamma=2 / 1.3**2, log_period=0.0)
k3 = 0.66**2 * kernels.RationalQuadraticKernel(log_alpha=np.log(0.78),
                                               metric=1.2**2)
k4 = 0.18**2 * kernels.ExpSquaredKernel(1.6**2)
kernel = k1 + k2 + k3 + k4

gp = george.GP(kernel,
               mean=np.mean(y),
               fit_mean=True,
               white_noise=np.log(0.19**2),
               fit_white_noise=True)
gp.compute(t)
print(gp.log_likelihood(y))
print(gp.grad_log_likelihood(y))

import scipy.optimize as op

示例#9
0
import logging

import george
from george import kernels

fig, ax = plt.subplots()
plt.subplots_adjust(left=0.25, bottom=0.40)
x = np.linspace(0.0, 1.0, 100)

k1, k2 = (1e-3, 3.35)
k3 = 0
#kernel = k1 * kernels.ExpKernel(np.exp(k2))
#kernel = k1 * kernels.LinearKernel(log_gamma2=k2, order=3)
#kernel = k1 * kernels.ExpSquaredKernel(metric=k2)
kernel = k1 * kernels.RationalQuadraticKernel(log_alpha=k2, metric=1)

gp = george.GP(kernel, fit_white_noise=False)
#    white_noise=k3,
#    fit_white_noise=True
#    )

gp.compute(x)

l, = ax.plot(x, gp.sample(), c="tab:blue")
l2, = ax.plot(x, gp.sample(), c="tab:blue", alpha=0.6)
l3, = ax.plot(x, gp.sample(), c="tab:blue", alpha=0.3)

axcolor = 'lightgoldenrodyellow'
ax_k1 = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)
ax_k2 = plt.axes([0.25, 0.15, 0.65, 0.03], facecolor=axcolor)
示例#10
0
        
    param_bounds = [
        (0, None),
        (-100, 100),
        (-0.5, 0.5),
        (0, None)
    ]

    get_omega = get_omega_arctan
    
    init_params = np.array([50, 90, -0.25, mean_omega_init])
    
else:


    kernel = 10 * kernels.RationalQuadraticKernel(log_alpha=1, metric=1)

    gp = george.GP(
        kernel,
        mean=mean_omega_init,
        fit_mean=True
    )
    gp.freeze_parameter("kernel:k2:metric:log_M_0_0")
    gp.compute(r)

    def get_omega(params):
        gp.set_parameter_vector(params)
        return gp.sample()

    init_params = gp.get_parameter_vector()
示例#11
0
# Plot the data.
fig = pl.figure(figsize=(6, 3.5))
ax = fig.add_subplot(111)
ax.plot(t, y, ".k", ms=2)
ax.set_xlim(min(t), 1999)
ax.set_ylim(min(y), 369)
ax.set_xlabel("year")
ax.set_ylabel("CO$_2$ in ppm")
fig.subplots_adjust(left=0.15, bottom=0.2, right=0.99, top=0.95)
fig.savefig("../_static/hyper/data.png", dpi=150)

# Initialize the kernel.
k1 = 66.0**2 * kernels.ExpSquaredKernel(67.0**2)
k2 = 2.4**2 * kernels.ExpSquaredKernel(90**2) \
    * kernels.ExpSine2Kernel(2.0 / 1.3**2, 1.0)
k3 = 0.66**2 * kernels.RationalQuadraticKernel(0.78, 1.2**2)
k4 = 0.18**2 * kernels.ExpSquaredKernel(1.6**2) + kernels.WhiteKernel(0.19)
kernel = k1 + k2 + k3 + k4

# Set up the Gaussian process and maximize the marginalized likelihood.
gp = george.GP(kernel, mean=np.mean(y))

# Define the objective function (negative log-likelihood in this case).
def nll(p):
    # Update the kernel parameters and compute the likelihood.
    gp.kernel[:] = p
    ll = gp.lnlikelihood(y, quiet=True)

    # The scipy optimizer doesn't play well with infinities.
    return -ll if np.isfinite(ll) else 1e25
示例#12
0
import george
from george import kernels

from celerite import terms

__all__ = ["george_kernels", "george_solvers",
		"celerite_terms",
		"setup_george_kernel", "setup_celerite_terms"]

george_kernels = {
	"Exp2": kernels.ExpSquaredKernel(10**2),
	"Exp2ESin2": (kernels.ExpSquaredKernel(10**2) *
				kernels.ExpSine2Kernel(2 / 1.3**2, 1.0)),
	"ESin2": kernels.ExpSine2Kernel(2 / 1.3**2, 1.0),
	"27dESin2": kernels.ExpSine2Kernel(2 / 1.3**2, 27.0 / 365.25),
	"RatQ": kernels.RationalQuadraticKernel(0.8, 0.1**2),
	"Mat32": kernels.Matern32Kernel((0.5)**2),
	"Exp": kernels.ExpKernel((0.5)**2),
	# "W": kernels.WhiteKernel,  # deprecated, delegated to `white_noise`
	"B": kernels.ConstantKernel,
}
george_solvers = {
	"basic": george.BasicSolver,
	"HODLR": george.HODLRSolver,
}

celerite_terms = {
	"N": terms.Term(),
	"B": terms.RealTerm(log_a=-6., log_c=-np.inf,
				bounds={"log_a": [-30, 30],
						"log_c": [-np.inf, np.inf]}),
示例#13
0
def predict_Q_data(input_dir_2, filename4="training.txt"):

    nu, lam_squared, stokesQ, stokesU = read_data(input_dir_2, filename4)

    nu_R = nu

    stokesQ_R = stokesQ

    lam_R = lam_squared

    # Squared exponential kernel
    k1 = 0.3**2 * kernels.ExpSquaredKernel(0.02**2)

    # periodic covariance kernel with exponential component toallow decay away from periodicity

    k2 = 0.6**2 * kernels.ExpSquaredKernel(0.5**2) * kernels.ExpSine2Kernel(
        gamma=2 / 2.5**2, log_period=0.0)
    ###vary gamma value to widen the    funnel

    # rational quadratic kernel for medium term irregularities.

    k3 = 0.3**2 * kernels.RationalQuadraticKernel(log_alpha=np.log(0.1),
                                                  metric=0.1**2)

    # noise kernel: includes correlated noise & uncorrelated noise

    k4 = 0.18**2 * kernels.ExpSquaredKernel(1.6**2)

    kernel = k1 + k2  #+k3 + k4

    gp = george.GP(kernel,
                   mean=np.mean(stokesQ),
                   fit_mean=True,
                   white_noise=np.log(0.02**2),
                   fit_white_noise=True)
    #gp = george.GP(kernel)

    gp.compute(lam_R)

    # range of times for prediction:

    x = np.linspace(np.min(lam_squared) - 0.01, np.max(lam_squared),
                    1024)  #extend to smaller wavelengths

    # calculate expectation and variance at each point:
    mu_initial, cov = gp.predict(stokesQ_R, x)
    #mu, cov = gp.predict(stokesQ_2Gz, x, return_var = True)

    std_initial = np.sqrt(np.diag(cov))

    print("Initial prediction of missing Q values: \n {}".format(mu_initial))

    # Define the objective function (negative log-likelihood in this case).
    def nll(p):
        gp.set_parameter_vector(p)
        ll = gp.log_likelihood(stokesQ_R, quiet=True)
        return -ll if np.isfinite(ll) else 1e25

    # And the gradient of the objective function.
    def grad_nll(p):
        gp.set_parameter_vector(p)
        return -gp.grad_log_likelihood(stokesQ_R, quiet=True)

    gp.compute(lam_R)

    p0 = gp.get_parameter_vector()

    results = op.minimize(nll, p0, jac=grad_nll, method="L-BFGS-B")

    # run optimization:
    #results = op.minimize(nll, p0, jac=grad_nll)

    gp.set_parameter_vector(results.x)

    x = np.linspace(np.min(lam_squared) - 0.01, np.max(lam_squared),
                    1024)  #extend to smaller wavelengths

    mu_optimized, cov = gp.predict(stokesQ_R, x)

    std_optimized = np.sqrt(np.diag(cov))

    print("Final prediction of missing Q values: \n {}".format(mu_optimized))

    return nu_R, lam_R, stokesQ_R, mu_initial, std_initial, mu_optimized, std_optimized
示例#14
0
    plt.xlabel('time [day]')
    plt.show()

#==============================================================================
# GP
#==============================================================================

from george import kernels

k1 = 10 * kernels.ExpSquaredKernel(metric=10**2)
# k2 = 1**2 * kernels.ExpSquaredKernel(90**2) * kernels.ExpSine2Kernel(gamma=1, log_period=1.9)
k2 = 20 * kernels.ExpSquaredKernel(200**2) * kernels.ExpSine2Kernel(
    gamma=2,
    log_period=np.log(17),
    bounds=dict(gamma=(-3, 30), log_period=(np.log(17 - 5), np.log(17 + 5))))
k3 = 20 * kernels.RationalQuadraticKernel(log_alpha=np.log(100), metric=120**2)
k4 = 10 * kernels.ExpSquaredKernel(1000**2)
# kernel = k1 + k2 + k3 + k4
kernel = k3 + k2 + k4
import george
#gp = george.GP(kernel, mean=np.mean(y), fit_mean=True,
#               white_noise=np.log(0.19**2), fit_white_noise=True)
gp = george.GP(kernel, mean=np.mean(y), fit_mean=True)

gp.compute(t, yerr)

#==============================================================================
# Optimization
#==============================================================================
if 0:
def main():
    k1 = 66.0**2 * kernels.ExpSquaredKernel(67.0**2)
    k2 = 2.4**2 * kernels.ExpSquaredKernel(90**2) * kernels.ExpSine2Kernel(
        2.0 / 1.3**2, 1.0)

    k3 = 0.66**2 * kernels.RationalQuadraticKernel(0.78, 1.2**2)

    k4 = 0.18**2 * kernels.ExpSquaredKernel(1.6**2) + kernels.WhiteKernel(0.19)

    kernel = k2 + k4  #k1 + k2 + k3 + k4

    gp = george.GP(kernel)

    indata = np.loadtxt(
        '/Users/lapguest/newbol/bol_ni_ej/out_files/err_bivar_regress.txt',
        usecols=(5, 6, 1, 2),
        skiprows=1)

    def nll(p):
        # Update the kernel parameters and compute the likelihood.
        gp.kernel[:] = p
        ll = gp.lnlikelihood(indata[:, 2], quiet=True)

        # The scipy optimizer doesn't play well with infinities.
        return -ll if np.isfinite(ll) else 1e25
        # And the gradient of the objective function.
    def grad_nll(p):
        # Update the kernel parameters and compute the likelihood.
        gp.kernel[:] = p
        return -gp.grad_lnlikelihood(indata[:, 2], quiet=True)
        #ph=lc['MJD']-tbmax
        #condition for second maximum
        ##TODO: GUI for selecting region
        #cond=(ph>=10.0) & (ph<=40.0)
        #define the data in the region of interest

    print "Fitting with george"

    #print max(mag)

    # Pre-compute the factorization of the matrix.
    gp.compute(indata[:, 0], indata[:, 2])
    #print gp.lnlikelihood(mag), gp.grad_lnlikelihood(mag)
    gp.compute(indata[:, 0], indata[:, 2])

    if sys.argv[1] == 'mle':
        p0 = gp.kernel.vector
        results = op.minimize(nll, p0, jac=grad_nll)

        gp.kernel[:] = results.x

    print gp.kernel.value

    t2 = indata[:, 0]
    t = np.linspace(t2.min(), t2.max(), 100)

    mu, cov = gp.predict(indata[:, 1], t)

    print gp.predict(indata[:, 1], 31.99)[0]
    std = np.sqrt(np.diag(cov))

    plt.plot(t2, indata[:, 2], 'bo')

    plt.plot(t, mu, 'r:', linewidth=3)
    plt.fill_between(t, mu - std, mu + std, alpha=0.3)
    plt.show()