def lnlike_gp(self): if (self.t is None) | (self.y is None): raise ValueError( "Data is not properly initialized. Reveived Nones.") elif len(self.t) == 1: raise ValueError( "Time data is not properly initialized. Expected array of size greater then 1." ) else: t, y = self.t, self.y if self.kernel_type == "Standard": kernel = 1. * kernels.ExpSquaredKernel( 5.) + kernels.WhiteKernel(2.) gp = george.GP(kernel, mean=self.meanfnc) gp.compute(t, self.yerr1) return gp.lnlikelihood(y - self.model) else: kernel = kernels.PythonKernel(self.kernelfnc) gp = george.GP(kernel, mean=self.meanfnc) gp.compute(t) return gp.lnlikelihood(y - self.model)
def angus_kernel(theta): """ use the kernel that Ruth Angus uses. Be sure to cite her """ theta = np.exp(theta) A = theta[0] l = theta[1] G = theta[2] sigma = theta[4] P = theta[3] kernel = (A * kernels.ExpSquaredKernel(l) * kernels.ExpSine2Kernel(G, P) + kernels.WhiteKernel(sigma) ) return kernel
fig = pl.figure(figsize=(6, 3.5)) ax = fig.add_subplot(111) ax.plot(t, y, ".k", ms=2) ax.set_xlim(min(t), 1999) ax.set_ylim(min(y), 369) ax.set_xlabel("year") ax.set_ylabel("CO$_2$ in ppm") fig.subplots_adjust(left=0.15, bottom=0.2, right=0.99, top=0.95) fig.savefig("../_static/hyper/data.png", dpi=150) # Initialize the kernel. k1 = 66.0**2 * kernels.ExpSquaredKernel(67.0**2) k2 = 2.4**2 * kernels.ExpSquaredKernel(90**2) \ * kernels.ExpSine2Kernel(2.0 / 1.3**2, 1.0) k3 = 0.66**2 * kernels.RationalQuadraticKernel(0.78, 1.2**2) k4 = 0.18**2 * kernels.ExpSquaredKernel(1.6**2) + kernels.WhiteKernel(0.19) kernel = k1 + k2 + k3 + k4 # Set up the Gaussian process and maximize the marginalized likelihood. gp = george.GP(kernel, mean=np.mean(y)) # Define the objective function (negative log-likelihood in this case). def nll(p): # Update the kernel parameters and compute the likelihood. gp.kernel[:] = p ll = gp.lnlikelihood(y, quiet=True) # The scipy optimizer doesn't play well with infinities. return -ll if np.isfinite(ll) else 1e25 # And the gradient of the objective function.
def main(): k1 = 66.0**2 * kernels.ExpSquaredKernel(67.0**2) k2 = 2.4**2 * kernels.ExpSquaredKernel(90**2) * kernels.ExpSine2Kernel( 2.0 / 1.3**2, 1.0) k3 = 0.66**2 * kernels.RationalQuadraticKernel(0.78, 1.2**2) k4 = 0.18**2 * kernels.ExpSquaredKernel(1.6**2) + kernels.WhiteKernel(0.19) kernel = k2 + k4 #k1 + k2 + k3 + k4 gp = george.GP(kernel) indata = np.loadtxt( '/Users/lapguest/newbol/bol_ni_ej/out_files/err_bivar_regress.txt', usecols=(5, 6, 1, 2), skiprows=1) def nll(p): # Update the kernel parameters and compute the likelihood. gp.kernel[:] = p ll = gp.lnlikelihood(indata[:, 2], quiet=True) # The scipy optimizer doesn't play well with infinities. return -ll if np.isfinite(ll) else 1e25 # And the gradient of the objective function. def grad_nll(p): # Update the kernel parameters and compute the likelihood. gp.kernel[:] = p return -gp.grad_lnlikelihood(indata[:, 2], quiet=True) #ph=lc['MJD']-tbmax #condition for second maximum ##TODO: GUI for selecting region #cond=(ph>=10.0) & (ph<=40.0) #define the data in the region of interest print "Fitting with george" #print max(mag) # Pre-compute the factorization of the matrix. gp.compute(indata[:, 0], indata[:, 2]) #print gp.lnlikelihood(mag), gp.grad_lnlikelihood(mag) gp.compute(indata[:, 0], indata[:, 2]) if sys.argv[1] == 'mle': p0 = gp.kernel.vector results = op.minimize(nll, p0, jac=grad_nll) gp.kernel[:] = results.x print gp.kernel.value t2 = indata[:, 0] t = np.linspace(t2.min(), t2.max(), 100) mu, cov = gp.predict(indata[:, 1], t) print gp.predict(indata[:, 1], 31.99)[0] std = np.sqrt(np.diag(cov)) plt.plot(t2, indata[:, 2], 'bo') plt.plot(t, mu, 'r:', linewidth=3) plt.fill_between(t, mu - std, mu + std, alpha=0.3) plt.show()