class WithinModelComparison(BaseTask): def __init__(self, seed=42): X_lower = np.array([0, 0]) X_upper = np.array([1, 1]) rng = np.random.RandomState(seed) cov_amp = 1.0 mat_kernel = george.kernels.Matern52Kernel(np.ones([2]) * 0.1, ndim=2) kernel = cov_amp * mat_kernel self.xstar = rng.rand(1000, 2) K = kernel.value(self.xstar) L = sla.cholesky(K) sigma = rng.randn(1000) self.f = np.dot(L, sigma) self.gp = GaussianProcess(kernel, yerr=0.0) self.gp.train(self.xstar, self.f[:, np.newaxis], do_optimize=False) self.gp.train(self.xstar, self.f[:, np.newaxis], do_optimize=False) best = np.argmin(self.f) fopt = self.f[best] opt = self.xstar[best] super(WithinModelComparison, self).__init__(X_lower, X_upper, opt, fopt) def objective_function(self, x): noise = 1e-3 * np.random.randn() mu, _ = self.gp.predict(x) return mu + noise def evaluate_test(self, x): return self.objective_function(x)
class WithinModelComparison(BaseTask): def __init__(self, seed=42): X_lower = np.array([0, 0]) X_upper = np.array([1, 1]) rng = np.random.RandomState(seed) cov_amp = 1.0 mat_kernel = george.kernels.Matern52Kernel(np.ones([2]) * 0.1, ndim=2) kernel = cov_amp * mat_kernel self.xstar = rng.rand(1000, 2) K = kernel.value(self.xstar) L = sla.cholesky(K) sigma = rng.randn(1000) self.f = np.dot(L, sigma) self.gp = GaussianProcess(kernel, yerr=0.0) self.gp.train(self.xstar, self.f[:, np.newaxis], do_optimize=False) self.gp.train(self.xstar, self.f[:, np.newaxis], do_optimize=False) best = np.argmin(self.f) fopt = self.f[best] opt = self.xstar[best] super(WithinModelComparison, self).__init__(X_lower, X_upper, opt, fopt) def objective_function(self, x): noise = 1e-3 * np.random.randn() mu, _ = self.gp.predict(x) return mu + noise def evaluate_test(self, x): return self.objective_function(x)
def test(self): X_lower = np.array([0]) X_upper = np.array([1]) X = init_random_uniform(X_lower, X_upper, 10) Y = np.sin(X) kernel = george.kernels.Matern52Kernel(np.ones([1]), ndim=1) prior = TophatPrior(-2, 2) model = GaussianProcess(kernel, prior=prior) model.train(X, Y) x_test = init_random_uniform(X_lower, X_upper, 3) # Shape matching predict m, v = model.predict(x_test) assert len(m.shape) == 2 assert m.shape[0] == x_test.shape[0] assert m.shape[1] == 1 assert len(v.shape) == 2 assert v.shape[0] == x_test.shape[0] assert v.shape[1] == x_test.shape[0] #TODO: check gradients # Shape matching function sampling x_ = np.linspace(X_lower, X_upper, 10) x_ = x_[:, np.newaxis] funcs = model.sample_functions(x_, n_funcs=2) assert len(funcs.shape) == 2 assert funcs.shape[0] == 2 assert funcs.shape[1] == x_.shape[0] # Shape matching predict variance x_test1 = np.array([np.random.rand(1)]) x_test2 = np.random.rand(10)[:, np.newaxis] var = model.predict_variance(x_test1, x_test2) assert len(var.shape) == 2 assert var.shape[0] == x_test2.shape[0] assert var.shape[1] == 1 # Check compatibility with all acquisition functions acq_func = EI(model, X_upper=X_upper, X_lower=X_lower) acq_func.update(model) acq_func(x_test) acq_func = PI(model, X_upper=X_upper, X_lower=X_lower) acq_func.update(model) acq_func(x_test) acq_func = LCB(model, X_upper=X_upper, X_lower=X_lower) acq_func.update(model) acq_func(x_test) acq_func = InformationGain(model, X_upper=X_upper, X_lower=X_lower) acq_func.update(model) acq_func(x_test) # Check compatibility with all incumbent estimation methods rec = BestObservation(model, X_lower, X_upper) inc, inc_val = rec.estimate_incumbent(None) assert len(inc.shape) == 2 assert inc.shape[0] == 1 assert inc.shape[1] == X_upper.shape[0] assert len(inc_val.shape) == 2 assert inc_val.shape[0] == 1 assert inc_val.shape[1] == 1 rec = PosteriorMeanOptimization(model, X_lower, X_upper) startpoints = init_random_uniform(X_lower, X_upper, 4) inc, inc_val = rec.estimate_incumbent(startpoints) assert len(inc.shape) == 2 assert inc.shape[0] == 1 assert inc.shape[1] == X_upper.shape[0] assert len(inc_val.shape) == 2 assert inc_val.shape[0] == 1 assert inc_val.shape[1] == 1 rec = PosteriorMeanAndStdOptimization(model, X_lower, X_upper) startpoints = init_random_uniform(X_lower, X_upper, 4) inc, inc_val = rec.estimate_incumbent(startpoints) assert len(inc.shape) == 2 assert inc.shape[0] == 1 assert inc.shape[1] == X_upper.shape[0] assert len(inc_val.shape) == 2 assert inc_val.shape[0] == 1 assert inc_val.shape[1] == 1
class TestGaussianProcess(unittest.TestCase): def setUp(self): self.X = np.random.rand(10, 2) self.y = np.sinc(self.X * 10 - 5).sum(axis=1) self.kernel = george.kernels.Matern52Kernel(np.ones(self.X.shape[1]), ndim=self.X.shape[1]) prior = TophatPrior(-2, 2) self.model = GaussianProcess(self.kernel, prior=prior, normalize_input=False, normalize_output=False) self.model.train(self.X, self.y, do_optimize=False) def test_predict(self): X_test = np.random.rand(10, 2) m, v = self.model.predict(X_test) assert len(m.shape) == 1 assert m.shape[0] == X_test.shape[0] assert len(v.shape) == 1 assert v.shape[0] == X_test.shape[0] m, v = self.model.predict(X_test, full_cov=True) assert len(m.shape) == 1 assert m.shape[0] == X_test.shape[0] assert len(v.shape) == 2 assert v.shape[0] == X_test.shape[0] assert v.shape[1] == X_test.shape[0] K_zz = self.kernel.value(X_test) K_zx = self.kernel.value(X_test, self.X) K_nz = self.kernel.value( self.X) + self.model.noise * np.eye(self.X.shape[0]) inv = spla.inv(K_nz) K_zz_x = K_zz - np.dot(K_zx, np.inner(inv, K_zx)) assert np.mean((K_zz_x - v)**2) < 10e-5 def test_sample_function(self): X_test = np.random.rand(8, 2) n_funcs = 3 funcs = self.model.sample_functions(X_test, n_funcs=n_funcs) assert len(funcs.shape) == 2 assert funcs.shape[0] == n_funcs assert funcs.shape[1] == X_test.shape[0] def test_predict_variance(self): x_test1 = np.random.rand(1, 2) x_test2 = np.random.rand(10, 2) var = self.model.predict_variance(x_test1, x_test2) assert len(var.shape) == 2 assert var.shape[0] == x_test2.shape[0] assert var.shape[1] == x_test1.shape[0] def test_nll(self): theta = np.array([0.2, 0.2, 0.001]) nll = self.model.nll(theta) def test_optimize(self): theta = self.model.optimize() # Hyperparameters are 2 length scales + noise assert theta.shape[0] == 3 def test_get_incumbent(self): inc, inc_val = self.model.get_incumbent() b = np.argmin(self.y) np.testing.assert_almost_equal(inc, self.X[b], decimal=5) assert inc_val == self.y[b]
def test(self): X_lower = np.array([0]) X_upper = np.array([1]) X = init_random_uniform(X_lower, X_upper, 10) Y = np.sin(X) kernel = george.kernels.Matern52Kernel(np.ones([1]), ndim=1) prior = TophatPrior(-2, 2) model = GaussianProcess(kernel, prior=prior) model.train(X, Y) x_test = init_random_uniform(X_lower, X_upper, 3) # Shape matching predict m, v = model.predict(x_test) assert len(m.shape) == 2 assert m.shape[0] == x_test.shape[0] assert m.shape[1] == 1 assert len(v.shape) == 2 assert v.shape[0] == x_test.shape[0] assert v.shape[1] == x_test.shape[0] #TODO: check gradients # Shape matching function sampling x_ = np.linspace(X_lower, X_upper, 10) x_ = x_[:, np.newaxis] funcs = model.sample_functions(x_, n_funcs=2) assert len(funcs.shape) == 2 assert funcs.shape[0] == 2 assert funcs.shape[1] == x_.shape[0] # Shape matching predict variance x_test1 = np.array([np.random.rand(1)]) x_test2 = np.random.rand(10)[:, np.newaxis] var = model.predict_variance(x_test1, x_test2) assert len(var.shape) == 2 assert var.shape[0] == x_test2.shape[0] assert var.shape[1] == 1 # Check compatibility with all acquisition functions acq_func = EI(model, X_upper=X_upper, X_lower=X_lower) acq_func.update(model) acq_func(x_test) acq_func = PI(model, X_upper=X_upper, X_lower=X_lower) acq_func.update(model) acq_func(x_test) acq_func = LCB(model, X_upper=X_upper, X_lower=X_lower) acq_func.update(model) acq_func(x_test) acq_func = InformationGain(model, X_upper=X_upper, X_lower=X_lower) acq_func.update(model) acq_func(x_test) # Check compatibility with all incumbent estimation methods rec = BestObservation(model, X_lower, X_upper) inc, inc_val = rec.estimate_incumbent(None) assert len(inc.shape) == 2 assert inc.shape[0] == 1 assert inc.shape[1] == X_upper.shape[0] assert len(inc_val.shape) == 2 assert inc_val.shape[0] == 1 assert inc_val.shape[1] == 1 rec = PosteriorMeanOptimization(model, X_lower, X_upper) startpoints = init_random_uniform(X_lower, X_upper, 4) inc, inc_val = rec.estimate_incumbent(startpoints) assert len(inc.shape) == 2 assert inc.shape[0] == 1 assert inc.shape[1] == X_upper.shape[0] assert len(inc_val.shape) == 2 assert inc_val.shape[0] == 1 assert inc_val.shape[1] == 1 rec = PosteriorMeanAndStdOptimization(model, X_lower, X_upper) startpoints = init_random_uniform(X_lower, X_upper, 4) inc, inc_val = rec.estimate_incumbent(startpoints) assert len(inc.shape) == 2 assert inc.shape[0] == 1 assert inc.shape[1] == X_upper.shape[0] assert len(inc_val.shape) == 2 assert inc_val.shape[0] == 1 assert inc_val.shape[1] == 1
class TestGaussianProcess(unittest.TestCase): def setUp(self): self.X = np.random.rand(10, 2) self.y = np.sinc(self.X * 10 - 5).sum(axis=1) self.kernel = george.kernels.Matern52Kernel(np.ones(self.X.shape[1]), ndim=self.X.shape[1]) prior = TophatPrior(-2, 2) self.model = GaussianProcess(self.kernel, prior=prior, normalize_input=False, normalize_output=False) self.model.train(self.X, self.y, do_optimize=False) def test_predict(self): X_test = np.random.rand(10, 2) m, v = self.model.predict(X_test) assert len(m.shape) == 1 assert m.shape[0] == X_test.shape[0] assert len(v.shape) == 1 assert v.shape[0] == X_test.shape[0] m, v = self.model.predict(X_test, full_cov=True) assert len(m.shape) == 1 assert m.shape[0] == X_test.shape[0] assert len(v.shape) == 2 assert v.shape[0] == X_test.shape[0] assert v.shape[1] == X_test.shape[0] K_zz = self.kernel.value(X_test) K_zx = self.kernel.value(X_test, self.X) K_nz = self.kernel.value(self.X) + self.model.noise * np.eye(self.X.shape[0]) inv = spla.inv(K_nz) K_zz_x = K_zz - np.dot(K_zx, np.inner(inv, K_zx)) assert np.mean((K_zz_x - v)**2) < 10e-5 def test_sample_function(self): X_test = np.random.rand(8, 2) n_funcs = 3 funcs = self.model.sample_functions(X_test, n_funcs=n_funcs) assert len(funcs.shape) == 2 assert funcs.shape[0] == n_funcs assert funcs.shape[1] == X_test.shape[0] def test_predict_variance(self): x_test1 = np.random.rand(1, 2) x_test2 = np.random.rand(10, 2) var = self.model.predict_variance(x_test1, x_test2) assert len(var.shape) == 2 assert var.shape[0] == x_test2.shape[0] assert var.shape[1] == x_test1.shape[0] def test_nll(self): theta = np.array([0.2, 0.2, 0.001]) nll = self.model.nll(theta) def test_optimize(self): theta = self.model.optimize() # Hyperparameters are 2 length scales + noise assert theta.shape[0] == 3 def test_get_incumbent(self): inc, inc_val = self.model.get_incumbent() b = np.argmin(self.y) np.testing.assert_almost_equal(inc, self.X[b], decimal=5) assert inc_val == self.y[b]