def predict(self, sa): idx = self.kdt.query(sa, k=self.K, return_distance=False) X_nn = self.Xtrain[idx, :].reshape(self.K, self.state_action_dim) Y_nn = self.Ytrain[idx, :].reshape(self.K, self.state_dim) mu = np.zeros(self.state_dim) sigma = np.zeros(self.state_dim) for dim in range(self.state_dim): model = pyGPs.GPR_FITC() # specify model (GP regression) # Set number of latent inducing points (Andersson et al. 2015) num_u = np.fix(15) u = np.tile(np.linspace(0, 1, num_u).T, (1, self.state_action_dim)) u = np.reshape(u, (int(num_u), self.state_action_dim)) m = pyGPs.mean.Linear(D=X_nn.shape[1]) k = pyGPs.cov.RBF(log_ell=5., log_sigma=-5) model.setPrior(mean=m, kernel=k, inducing_points=u) # Optimize model.setData( X_nn, Y_nn[:, dim] ) # fit default model (mean zero & rbf kernel) with data model.getPosterior() model.optimize( X_nn, Y_nn[:, dim] ) # optimize hyperparamters (default optimizer: single run minimize) # Predict model.predict(sa.reshape(1, self.state_action_dim)) mu[dim] = model.ym sigma[dim] = np.sqrt(model.ys2) return mu, sigma
def test_GPR_FITC(self): print("testing GP sparse regression...") model = pyGPs.GPR_FITC() m = pyGPs.mean.Zero() k = pyGPs.cov.RBF() model.setPrior(mean=m, kernel=k, inducing_points=self.ur) model.setOptimizer("Minimize", num_restarts=10) model.optimize(self.xr, self.yr) model.predict(self.zr) self.checkRegressionOutput(model)
import matplotlib.pyplot as plt import numpy as np import pyGPs demoData = np.load('../../data/regression_data.npz') x = demoData['x'] y = demoData['y'] z = demoData['xstar'] model_sparse = pyGPs.GPR_FITC() model_sparse.setData(x, y) model_sparse.optimize() model_sparse.predict(z) model_sparse.plot() # Training Error prediction_x = model_sparse.predict(x)[0] error_x = np.linalg.norm(prediction_x - y, 2) / np.linalg.norm(y, 2) print('Training Error: %e' % error_x) # Spectrum induction_points = model_sparse.u K_mm = model_sparse.covfunc.getCovMatrix(induction_points, induction_points, mode='train')[1] K_mn = model_sparse.covfunc.getCovMatrix(induction_points, x, mode='cross') K_nm = np.transpose(K_mn) def bigLambda(KM, KNM, KMN): lamb = np.multiply(KNM, np.transpose(np.linalg.solve(KM, KMN)))
# Load demo data (generated from Gaussians) #---------------------------------------------------------------------- demoData = np.load('regression_data.npz') x = demoData['x'] # training data y = demoData['y'] # training target z = demoData['xstar'] # test data #---------------------------------------------------------------------- # Sparse GP regression (FITC) example #---------------------------------------------------------------------- print("Example 1: default inducing points") # Start from a new model model = pyGPs.GPR_FITC() # Notice if you want to use default inducing points: # You MUST call setData(x,y) FIRST! # The default inducing points are a grid (hypercube in higher dimension), where # each dimension has 5 values in equidistant steps between min and max value of the input data by default. model.setData(x, y) # To set value per dimension use: # model.setData(x, y, value_per_axis=10) model.optimize() print("Negative log marginal liklihood optimized:", round(model.nlZ, 3)) # Prediction model.predict(z)