def test_gp(): x = np.linspace(-5,5,10)[:,None] # 10 data points, 1-D xtest = np.linspace(-6,6,200)[:,None] y = np.sin(x.flatten()) + np.sqrt(1e-3)*np.random.randn(x.shape[0]) ytest = np.sin(xtest.flatten()) # print 'Inputs' # print x # print 'Outputs' # print y data = {'inputs':x, 'values':y} pred = {'inputs':xtest, 'values':ytest} options = {'likelihood':'GAUSSIAN', 'mcmc-iters':500, 'burn-in':500, 'verbose':False, 'mcmc-diagnostics':True, 'thinning':0, 'priors': {'mean':{'distribution':'Gaussian', 'parameters':{'mu':0.0, 'sigma':1.0}}, 'noise':{'distribution':'Lognormal', 'parameters':{'scale':1.0}}, 'amp2' :{'distribution':'Lognormal', 'parameters':{'scale':1.0}} } } gp = GP(x.shape[1], **options) gp.fit(data) func_m, func_v = gp.predict(pred, full_cov=False, compute_grad=False)
def test_predict(): npr.seed(1) N = 10 Npend = 3 Ntest = 2 D = 5 gp = GP(D, burnin=5, num_fantasies=7) pred = npr.rand(Ntest, D) # Test with 0 points mu, v = gp.predict(pred) np.testing.assert_allclose(mu, 0, rtol=1e-7, atol=0, err_msg='', verbose=True) np.testing.assert_allclose(v, 1 + 1e-6, rtol=1e-7, atol=0, err_msg='', verbose=True) #Test with 1 point X = np.zeros((1, D)) W = npr.randn(D, 1) val = X.dot(W).flatten() + np.sqrt(1e-3) * npr.randn() gp.fit(X, val, fit_hypers=False) mu, v = gp.predict(pred) # Points closer to the origin will have less variance if np.linalg.norm(pred[0] - X) < np.linalg.norm(pred[1] - X): assert v[0] < v[1] else: assert v[0] > v[1] # Predict at the point itself mu, v = gp.predict(X) np.testing.assert_allclose(mu, val, rtol=1e-5, atol=0, err_msg='', verbose=True) # Now let's make sure it doesn't break with more data and pending inputs = npr.rand(N, D) vals = inputs.dot(W).flatten() + np.sqrt(1e-3) * npr.randn(N) pending = npr.rand(Npend, D) gp.fit(inputs, vals, pending) mu, v = gp.predict(pred) # Now let's check the gradients eps = 1e-5 mu, v, dmu, dv = gp.predict(pred, compute_grad=True) # The implied loss is np.sum(mu**2) + np.sum(v**2) dloss = 2 * (dmu * mu[:, np.newaxis, :]).sum(2) + 2 * ( v[:, np.newaxis, np.newaxis] * dv).sum(2) dloss_est = np.zeros(dloss.shape) for i in xrange(Ntest): for j in xrange(D): pred[i, j] += eps mu, v = gp.predict(pred) loss_1 = np.sum(mu**2) + np.sum(v**2) pred[i, j] -= 2 * eps mu, v = gp.predict(pred) loss_2 = np.sum(mu**2) + np.sum(v**2) pred[i, j] += eps dloss_est[i, j] = ((loss_1 - loss_2) / (2 * eps)) assert np.linalg.norm(dloss - dloss_est) < 1e-6
def test_predict(): npr.seed(1) N = 10 Npend = 3 Ntest = 2 D = 5 gp = GP(D, burnin=5, num_fantasies=7) pred = npr.rand(Ntest,D) # Test with 0 points mu, v = gp.predict(pred) np.testing.assert_allclose(mu, 0, rtol=1e-7, atol=0, err_msg='', verbose=True) np.testing.assert_allclose(v, 1+1e-6, rtol=1e-7, atol=0, err_msg='', verbose=True) #Test with 1 point X = np.zeros((1,D)) W = npr.randn(D,1) val = X.dot(W).flatten() + np.sqrt(1e-3)*npr.randn() gp.fit(X, val, fit_hypers=False) mu, v = gp.predict(pred) # Points closer to the origin will have less variance if np.linalg.norm(pred[0] - X) < np.linalg.norm(pred[1] - X): assert v[0] < v[1] else: assert v[0] > v[1] # Predict at the point itself mu, v = gp.predict(X) np.testing.assert_allclose(mu, val, rtol=1e-5, atol=0, err_msg='', verbose=True) # Now let's make sure it doesn't break with more data and pending inputs = npr.rand(N,D) vals = inputs.dot(W).flatten() + np.sqrt(1e-3)*npr.randn(N) pending = npr.rand(Npend,D) gp.fit(inputs, vals, pending) mu, v = gp.predict(pred) # Now let's check the gradients eps = 1e-5 mu, v, dmu, dv = gp.predict(pred, compute_grad=True) # The implied loss is np.sum(mu**2) + np.sum(v**2) dloss = 2*(dmu*mu[:,np.newaxis,:]).sum(2) + 2*(v[:,np.newaxis,np.newaxis]*dv).sum(2) dloss_est = np.zeros(dloss.shape) for i in xrange(Ntest): for j in xrange(D): pred[i,j] += eps mu, v = gp.predict(pred) loss_1 = np.sum(mu**2) + np.sum(v**2) pred[i,j] -= 2*eps mu, v = gp.predict(pred) loss_2 = np.sum(mu**2) + np.sum(v**2) pred[i,j] += eps dloss_est[i,j] = ((loss_1 - loss_2) / (2*eps)) assert np.linalg.norm(dloss - dloss_est) < 1e-6