def __init__(self, *args, **kwargs): unittest.TestCase.__init__(self, *args, **kwargs) num_p = 20 # The Kriging model starts by defining a sampling plan, we use an optimal Latin Hypercube here # sp = samplingplan(k=2) self.RMSE_mean = [] self.RMSE_std = [] self.X = sp.samplingplan().rlh(num_p) # self.X = sp.grid(num_p) # self.X = sp.MC(num_p) # self.X = sp.optimallhc(num_p) minx, maxx, miny, maxy = [-2, 2, -2, 2] self.X[:, 0] = minx + (maxx - minx) * self.X[:, 0] self.X[:, 1] = miny + (maxy - miny) * self.X[:, 1] self.testfun = pyKriging.testfunctions().branin # self.testfun = pyKriging.testfunctions().rosenbrock self.y = self.testfun(self.X)
#!/usr/bin/env python import pyKriging from pyKriging.krige import kriging from pyKriging.samplingplan import samplingplan # The Kriging model starts by defining a sampling plan, we use an optimal Latin Hypercube here sp = samplingplan(2) X = sp.optimallhc(20) # Next, we define the problem we would like to solve testfun = pyKriging.testfunctions().branin y = testfun(X) # Now that we have our initial data, we can create an instance of a Kriging model k = kriging(X, y, testfunction=testfun, name='simple') k.train() # Now, five infill points are added. Note that the model is re-trained after each point is added numiter = 5 for i in range(numiter): print 'Infill iteration {0} of {1}....'.format(i + 1, numiter) newpoints = k.infill(1) for point in newpoints: k.addPoint(point, testfun(point)[0]) k.train() # And plot the results k.plot()
__author__ = 'cpaulson' import pyKriging from pyKriging.krige import kriging from pyKriging.samplingplan import samplingplan from pyKriging.CrossValidation import Cross_Validation from pyKriging.utilities import saveModel # The Kriging model starts by defining a sampling plan, we use an optimal Latin Hypercube here sp = samplingplan(2) X = sp.optimallhc(5) # Next, we define the problem we would like to solve testfun = pyKriging.testfunctions().branin # We generate our observed values based on our sampling plan and the test function y = testfun(X) print 'Setting up the Kriging Model' cvMSE = [] # Now that we have our initial data, we can create an instance of a kriging model k = kriging(X, y, testfunction=testfun, name='simple', testPoints=300) k.train(optimizer='ga') k.snapshot() # cv = Cross_Validation(k) # cvMSE.append( cv.leave_n_out(q=5)[0] ) k.plot() for i in range(15): print i newpoints = k.infill(1) for point in newpoints:
__author__ = 'cpaulson' import pyKriging from pyKriging.krige import kriging from pyKriging.samplingplan import samplingplan # The Kriging model starts by defining a sampling plan, we use an optimal Latin Hypercube here sp = samplingplan(2) X = sp.optimallhc(15) # Next, we define the problem we would like to solve testfun = pyKriging.testfunctions().paulson1 y = testfun(X) # We can choose between a ga and a pso here optimizer = 'ga' # Now that we have our initial data, we can create an instance of a kriging model print 'Setting up the Kriging Model' k = kriging(X, y, testfunction=testfun, name='simple_ei', testPoints=300) k.train(optimizer=optimizer) k.snapshot() # Add 10 points based on model error reduction for i in range(5): newpoints = k.infill(1, method='error') for point in newpoints: print 'Adding point {}'.format(point) k.addPoint(point, testfun(point)[0]) k.train(optimizer=optimizer) k.snapshot()
def interpGrid(self): ptx = np.array(self.x) pty = np.array(self.y) z = np.array(self.z) print(len(ptx), 'length x') # remove duplicate x values dups = self.checkdups(self.x) ptx = np.delete(ptx, dups) pty = np.delete(pty, dups) z = np.delete(z, dups) print(len(ptx), 'length x') pts = zip(self.x, self.y) # gridx, gridy = np.mgrid[uprLeft[0]:lwrRight[0]:50j,uprLeft[1]:lwrRight[1]:50j] gridx, gridy = np.mgrid[self.ext[0]:self.ext[1]:self.ncol*1j, self.ext[2]:self.ext[3]:self.nrow*1j] ##### using griddata ##### if self.interptype == 'griddata': from scipy.interpolate import griddata self.grid = griddata(pts,self.z,(gridx,gridy), method='cubic',fill_value=-3e30) #### examples from ##### http://stackoverflow.com/questions/24978052/interpolation-over-regular-grid-in-python ##### using radial basis function #### if self.interptype == 'rbf': import scipy.interpolate as interpolate f = interpolate.Rbf(pty, ptx, z, function='linear') self.grid = f(gridy, gridx) ##### using kriging #### if self.interptype == 'gauss': from sklearn.gaussian_process import GaussianProcess # print math.sqrt(np.var(z)) # gp = GaussianProcess(theta0=0.1, thetaL=1.1, thetaU=10.1, nugget=0.000001) if np.min(z) <= 0: thetaL = 0.1 else: thetaL = np.min(z) print(np.min(z), thetaL, np.max(z)) # gp = GaussianProcess(regr='quadratic',corr='cubic',theta0=np.min(z),thetaL=thetaL,thetaU=np.max(z),nugget=0.05) gp = GaussianProcess(theta0=500,thetaL=100,thetaU=2000) gp.fit(X=np.column_stack([pty,ptx]),y=z) rr_cc_as_cols = np.column_stack([gridy.flatten(), gridx.flatten()]) self.grid = gp.predict(rr_cc_as_cols).reshape((self.ncol,self.nrow)) if self.interptype == 'krig': import pyKriging from pyKriging.krige import kriging from pyKriging.samplingplan import samplingplan # The Kriging model starts by defining a sampling plan, we use an optimal Latin Hypercube here # sp = samplingplan(2) # X = sp.optimallhc(20) # print(X) X = np.array(zip(self.x, self.y)) print(X.shape) # Next, we define the problem we would like to solve testfun = pyKriging.testfunctions().squared # y = testfun(X) # print(y) y = self.z # Now that we have our initial data, we can create an instance of a Kriging model k = kriging(X, y)#, testfunction=testfun, name='simple') # k.train() # Now, five infill points are added. Note that the model is re-trained after each point is added # numiter = 5 # for i in range(numiter): # print 'Infill iteration {0} of {1}....'.format(i + 1, numiter) # newpoints = k.infill(1) # for point in newpoints: # k.addPoint(point, testfun(point)[0]) # k.train() # And plot the results k.plot() sys.exit() self.grid[self.grid < self.minval] = -2.99999989403e+030 #self.minval self.grid = np.flipud(self.grid.T)