def test_pickle(solver, success, N=50, seed=123): np.random.seed(seed) kernel = 0.1 * kernels.ExpSquaredKernel(1.5) kernel.pars = [1, 2] gp = GP(kernel, solver=solver) x = np.random.rand(100) gp.compute(x, 1e-2) s = pickle.dumps(gp, -1) gp = pickle.loads(s) if success: gp.compute = _fake_compute gp.lnlikelihood(np.sin(x))
class GeorgeGP(object): def __init__(self, kernel): self.kernel = kernel self._gp = GP(kernel._k) self._y = None ## Cached inputs self._x = None ## Cached values self._dirty = True ## Flag telling if the arrays are up to date @property def is_dirty(self): return self._dirty def set_dirty(self, is_dirty=True): self._dirty = is_dirty def set_pv(self, pv=None): if pv is not None and not array_equal(pv, self.kernel._pv): self.kernel.set_pv(pv) self._gp.kernel = self.kernel._k self.set_dirty() def set_inputs(self, x=None): if x is not None and not array_equal(x, self._x): self._x = x self.set_dirty() def _covariance_matrix(self, x1, x2=None, pv=None, separate=False): self.set_pv(pv) if separate: return (self.kernel._k1.value(x1, x2), self.kernel._k2.value(x1, x2)) else: return self.kernel._k.value(x1, x2) def compute(self, x=None, pv=None): self.set_pv(pv) self.set_inputs(x) if self.is_dirty: self._gp.compute(self._x, yerr=self.kernel._pm[-1], sort=False) self.set_dirty(False) def negll(self, pv, y=None): y = y if y is not None else self._y self.compute(self._x, pv) return -self._gp.lnlikelihood(y) def predict(self, x, mean_only=True): return self._gp.predict(self._y, x, mean_only=mean_only) def predict_components(self, pv, y, x1, x2=None): self.compute(x1, pv) b = self._gp.solver.apply_inverse(y) K1 = self.kernel._k1.value(x1, x2) K2 = self.kernel._k2.value(x1, x2) mu_time = dot(K1,b) mu_pos = dot(K2,b) return mu_time, mu_pos
class GeorgeGP(object): def __init__(self, kernel): self.kernel = kernel self._gp = GP(kernel._k) self._y = None ## Cached inputs self._x = None ## Cached values self._dirty = True ## Flag telling if the arrays are up to date @property def is_dirty(self): return self._dirty def set_dirty(self, is_dirty=True): self._dirty = is_dirty def set_pv(self, pv=None): if pv is not None and not array_equal(pv, self.kernel._pv): self.kernel.set_pv(pv) self._gp.kernel = self.kernel._k self.set_dirty() def set_inputs(self, x=None): if x is not None and not array_equal(x, self._x): self._x = x self.set_dirty() def _covariance_matrix(self, x1, x2=None, pv=None, separate=False): self.set_pv(pv) if separate: return (self.kernel._k1.value(x1, x2), self.kernel._k2.value(x1, x2)) else: return self.kernel._k.value(x1, x2) def compute(self, x=None, pv=None): self.set_pv(pv) self.set_inputs(x) if self.is_dirty: self._gp.compute(self._x, yerr=self.kernel._pm[-1], sort=False) self.set_dirty(False) def negll(self, pv, y=None): y = y if y is not None else self._y self.compute(self._x, pv) return -self._gp.lnlikelihood(y) def predict(self, x, mean_only=True): return self._gp.predict(self._y, x, mean_only=mean_only) def predict_components(self, pv, y, x1, x2=None): self.compute(x1, pv) b = self._gp.solver.apply_inverse(y) K1 = self.kernel._k1.value(x1, x2) K2 = self.kernel._k2.value(x1, x2) mu_time = dot(K1, b) mu_pos = dot(K2, b) return mu_time, mu_pos
def lnprior(self, pars): theta = pars[:self.Nbins] if np.any(theta < 0): return -np.inf a, tau, err = np.exp(pars[self.Nbins:-1]) mean = pars[-1] kernel = a * kernels.ExpSquaredKernel(tau) gp = GP(kernel, mean=mean) gp.compute(self.bin_centers, yerr=err) return gp.lnlikelihood(theta) / self.smoothing
def test_gradient(solver, white_noise, seed=123, N=305, ndim=3, eps=1.32e-3): np.random.seed(seed) # Set up the solver. kernel = 1.0 * kernels.ExpSquaredKernel(0.5, ndim=ndim) kwargs = dict() if white_noise is not None: kwargs = dict(white_noise=white_noise, fit_white_noise=True) if solver == HODLRSolver: kwargs["tol"] = 1e-8 gp = GP(kernel, solver=solver, **kwargs) # Sample some data. x = np.random.rand(N, ndim) x = x[np.argsort(x[:, 0])] y = gp.sample(x) gp.compute(x, yerr=0.1) # Compute the initial gradient. grad0 = gp.grad_log_likelihood(y) vector = gp.get_parameter_vector() for i, v in enumerate(vector): # Compute the centered finite difference approximation to the gradient. vector[i] = v + eps gp.set_parameter_vector(vector) lp = gp.lnlikelihood(y) vector[i] = v - eps gp.set_parameter_vector(vector) lm = gp.lnlikelihood(y) vector[i] = v gp.set_parameter_vector(vector) grad = 0.5 * (lp - lm) / eps assert np.abs(grad - grad0[i]) < 5 * eps, \ "Gradient computation failed in dimension {0} ({1})\n{2}" \ .format(i, solver.__name__, np.abs(grad - grad0[i]))
def lnprior(self, pars): """ Smoothing prior using gaussian process. We will learn the hyperparameters and marginalize over them. """ theta = pars[:self.Nbins] if np.any(theta < 0): return -np.inf a, tau, err = np.exp(pars[self.Nbins:-1]) mean = pars[-1] kernel = a * kernels.ExpSquaredKernel(tau) gp = GP(kernel, mean=mean) gp.compute(self.bin_centers, yerr=err) return gp.lnlikelihood(theta) / self.smoothing
def lnlike(p, t, y, yerr, solver=BasicSolver): a, tau = np.exp(p[:2]) gp = GP(a * kernels.Matern32Kernel(tau) + 0.001, solver=solver) gp.compute(t, yerr) return gp.lnlikelihood(y - model(p, t))
def lnlike_QP(p, t, y): kern, sig = kern_QP(p) gp = GP(kern) yerr = np.ones(len(y)) * sig gp.compute(t, yerr) return gp.lnlikelihood(y)