def test1(): sp = samplingplan(2) x = [] for n in range(0, 4): x.append(np.random.rand(2) * 2 - 1) x = np.array(x) * 2 # Next, we define the problem we would like to solve testfun = func y = testfun(x) # Now that we have our initial data, we can create an instance of a Kriging model k = kriging(x, y, testfunction=testfun, name='simple') k.train() # Now, five infill points are added. Note that the model is re-trained after each point is added numiter = 10 for i in range(numiter): print 'Infill iteration {0} of {1}....'.format(i + 1, numiter) newpoints = k.infill(1) for point in newpoints: k.addPoint(point, testfun([point])[0]) k.train() # And plot the results k.plot()
def krig_sp(): # Initial random move norm = 2 * (np.random.rand(2) - 0.5) norm /= la.norm(norm) path = [np.array(norm) / 10] # Start krig model with origin and random displacement sp = samplingplan(len(norm)) hc = 2 * (sp.optimallhc(2**len(norm)) - 0.5) krig_init = [[0.0, 0.0]] for p in hc: krig_init.append(p) krig_init = np.array(krig_init) k = kriging(krig_init, func(krig_init), testfunction=func, name='simple') k.train() k.plot() last_found = norm for n in range(0, 100): plt.subplot(221) pots.plot(k.predict) found = [] weights = [] for n in range(0, 10): path = spalgo.act_relax_fd(k.predict, max_range=1.2, max_step=0.01, max_iter=1000) plt.plot(*zip(*path)) pt = np.array(path[-1]) duplicate = False for i, p in enumerate(found): if la.norm(pt - p) < 0.1: weights[i] += 1 duplicate = True break if duplicate: continue found.append(pt) weights.append(1) imax = weights.index(max(weights)) last_found = found[imax] plt.plot([last_found[0]], [last_found[1]], marker="+") plt.subplot(222) pots.plot(pots.pot) plt.plot(*zip(*path)) k.addPoint(path[-1], func([last_found])) k.train() plt.draw() plt.pause(0.1) plt.clf()
def __init__(self, X, y, testfunction=None, name='', testPoints=None, **kwargs): self.X = copy.deepcopy(X) self.y = copy.deepcopy(y) self.testfunction = testfunction self.name = name self.n = self.X.shape[0] self.k = self.X.shape[1] self.theta = np.ones(self.k) self.pl = np.ones(self.k) * 2. self.sigma = 0 self.normRange = [] self.ynormRange = [] self.normalizeData() self.sp = samplingplan.samplingplan(self.k) #self.updateData() #self.updateModel() self.thetamin = 1e-5 self.thetamax = 100 self.pmin = 1 self.pmax = 2 # Setup functions for tracking history self.history = {} self.history['points'] = [] self.history['neglnlike'] = [] self.history['theta'] = [] self.history['p'] = [] self.history['rsquared'] = [0] self.history['adjrsquared'] = [0] self.history['chisquared'] = [1000] self.history['lastPredictedPoints'] = [] self.history['avgMSE'] = [] if testPoints: self.history['pointData'] = [] self.testPoints = self.sp.rlh(testPoints) for point in self.testPoints: testPrimitive = {} testPrimitive['point'] = point if self.testfunction: testPrimitive['actual'] = self.testfunction(point)[0] else: testPrimitive['actual'] = None testPrimitive['predicted'] = [] testPrimitive['mse'] = [] testPrimitive['gradient'] = [] self.history['pointData'].append(testPrimitive) else: self.history['pointData'] = None matrixops.__init__(self)
def __init__(self, X, y, testfunction=None, name='', testPoints=None, **kwargs): self.X = copy.deepcopy(X) self.y = copy.deepcopy(y) self.testfunction = testfunction self.name = name self.n = self.X.shape[0] self.k = self.X.shape[1] self.theta = np.ones(self.k) self.pl = np.ones(self.k) * 2. self.sigma = 0 self.normRange = [] self.ynormRange = [] self.normalizeData() self.sp = samplingplan.samplingplan(self.k) self.updateData() self.updateModel() self.thetamin = 1e-5 self.thetamax = 100 self.pmin = 1 self.pmax = 2 # Setup functions for tracking history self.history = {} self.history['points'] = [] self.history['neglnlike'] = [] self.history['theta'] = [] self.history['p'] = [] self.history['rsquared'] = [0] self.history['adjrsquared'] = [0] self.history['chisquared'] = [1000] self.history['lastPredictedPoints'] = [] self.history['avgMSE'] = [] if testPoints: self.history['pointData'] = [] self.testPoints = self.sp.rlh(testPoints) for point in self.testPoints: testPrimitive = {} testPrimitive['point'] = point if self.testfunction: testPrimitive['actual'] = self.testfunction(point)[0] else: testPrimitive['actual'] = None testPrimitive['predicted'] = [] testPrimitive['mse'] = [] testPrimitive['gradient'] = [] self.history['pointData'].append(testPrimitive) else: self.history['pointData'] = None matrixops.__init__(self)
def generate_sample_plan(self, point_count, dimension, bounds, base=None): from pyKriging.samplingplan import samplingplan sp = samplingplan(dimension) X = sp.optimallhc(point_count) norm_point = X points = [] for i in range(0, point_count): scaled_point = [] for d in range(0, dimension): scaled_point.append(bounds[d][0] + (norm_point[i][d] * (bounds[d][1] - bounds[d][0]))) points.append(scaled_point) return points
def __init__(self, *args, **kwargs): unittest.TestCase.__init__(self, *args, **kwargs) num_p = 20 # The Kriging model starts by defining a sampling plan, we use an optimal Latin Hypercube here # sp = samplingplan(k=2) self.RMSE_mean = [] self.RMSE_std = [] self.X = sp.samplingplan().rlh(num_p) # self.X = sp.grid(num_p) # self.X = sp.MC(num_p) # self.X = sp.optimallhc(num_p) minx, maxx, miny, maxy = [-2, 2, -2, 2] self.X[:, 0] = minx + (maxx - minx) * self.X[:, 0] self.X[:, 1] = miny + (maxy - miny) * self.X[:, 1] self.testfun = pyKriging.testfunctions().branin # self.testfun = pyKriging.testfunctions().rosenbrock self.y = self.testfun(self.X)
def initial_create(self): ''' Defines and evaluates initial seeding. Saves LHS evaluations to file for quicker model initialisation when testing ''' # Create 2D optimal LHS over [0,1]^n sp = samplingplan(2) X = sp.optimallhc(30) # Stretch LHS over [-1,1]^n X = (2*X)-1 # Evaluates function at LHS points y = self.testfunction(X) lhs_record = {} lhs_record['location'] = copy.deepcopy(X) lhs_record['value'] = copy.deepcopy(y) with open('init_lhs.p', 'wb') as fp: leek.dump(lhs_record, fp, protocol=leek.HIGHEST_PROTOCOL)
from pyKriging.testfunctions import testfunctions dataFilePath = "../data/samples-data.data" labelsFilePath = "../data/samples-data-labels.data" def getTrainData(): """获取训练数据""" X = io.getData(dataFilePath) Y = io.getData(labelsFilePath) return X, Y # The Kriging model starts by defining a sampling plan, we use an optimal Latin Hypercube here sp = samplingplan(3) X = sp.optimallhc(30) print("[DEBUG] X: {}".format(X)) # Next, we define the problem we would like to solve testfun = testfunctions().squared y = testfun(X) # Now that we have our initial data, we can create an instance of a kriging model k = kriging(X, y, testfunction=testfun, testPoints=300) # The model is then trained k.train() k.snapshot()
#!/usr/bin/env python import pyKriging from pyKriging.krige import kriging from pyKriging.samplingplan import samplingplan # The Kriging model starts by defining a sampling plan, we use an optimal Latin Hypercube here sp = samplingplan(2) X = sp.optimallhc(20) # Next, we define the problem we would like to solve testfun = pyKriging.testfunctions().branin y = testfun(X) # Now that we have our initial data, we can create an instance of a Kriging model k = kriging(X, y, testfunction=testfun, name='simple') k.train() # Now, five infill points are added. Note that the model is re-trained after each point is added numiter = 5 for i in range(numiter): print 'Infill iteration {0} of {1}....'.format(i + 1, numiter) newpoints = k.infill(1) for point in newpoints: k.addPoint(point, testfun(point)[0]) k.train() # And plot the results k.plot()
import pyKriging from pyKriging.krige import kriging from pyKriging.samplingplan import samplingplan from pyKriging.testfunctions import testfunctions # The Kriging model starts by defining a sampling plan, we use an optimal Latin Hypercube here sp = samplingplan(3) X = sp.optimallhc(30) # Next, we define the problem we would like to solve testfun = testfunctions().squared y = testfun(X) # Now that we have our initial data, we can create an instance of a kriging model k = kriging(X, y, testfunction=testfun, testPoints=300) # The model is then trained k.train() k.snapshot() # It's typically beneficial to add additional points based on the results of the initial training # The infill method can be used for this # In this example, we will add nine points in three batches. The model gets trained after each stage for i in range(10): print k.history['rsquared'][-1] print 'Infill iteration {0}'.format(i + 1) infillPoints = k.infill(10) # Evaluate the infill points and add them back to the Kriging model for point in infillPoints:
from pyKriging.utilities import saveModel #import mayavi.mlab as mlab #Common to pySW from pySW import SW import psutil, time, shutil from pyDOE import * from doepy import build import time k = 13 n = 20 start_time = time.time() sp = samplingplan(k) X = sp.optimallhc(n) pd.set_option('display.max_columns', None) partName = r'4pipes_i2.SLDPRT' if "SLDWORKS.exe" in (p.name() for p in psutil.process_iter()) == False: print('starting SLDWORKS') SW.startSW() time.sleep(10) SW.connectToSW() SW.openPrt(psutil.os.getcwd() + '\\' + partName)
__author__ = 'cpaulson' import pyKriging from pyKriging.krige import kriging from pyKriging.samplingplan import samplingplan from pyKriging.CrossValidation import Cross_Validation from pyKriging.utilities import saveModel # The Kriging model starts by defining a sampling plan, we use an optimal Latin Hypercube here sp = samplingplan(2) X = sp.optimallhc(5) # Next, we define the problem we would like to solve testfun = pyKriging.testfunctions().branin # We generate our observed values based on our sampling plan and the test function y = testfun(X) print 'Setting up the Kriging Model' cvMSE = [] # Now that we have our initial data, we can create an instance of a kriging model k = kriging(X, y, testfunction=testfun, name='simple', testPoints=300) k.train(optimizer='ga') k.snapshot() # cv = Cross_Validation(k) # cvMSE.append( cv.leave_n_out(q=5)[0] ) k.plot() for i in range(15): print i newpoints = k.infill(1) for point in newpoints:
from pyKriging.samplingplan import samplingplan from pathlib import Path from pyDOE import * # # ******************************************************************************** # LHC inputs # ******************************************************************************** # # Design parameters and bounds (ar, w, t, l) params_base = np.array([1.0, 0.00015, 0.00015, 0.00100]) params_lb = np.array([1.0, 0.00010, 0.00010, 0.00090]) params_ub = np.array([2.3, 0.00020, 0.00020, 0.00120]) # # LHC sampling plan params_no = len(params_base) sp = samplingplan(params_no) lhc_us = sp.optimallhc(40) lhc_ps = np.empty((0, params_no)) for i in range(len(lhc_us)): ar = (params_ub[0] - params_lb[0]) * lhc_us[i,0] + params_lb[0] w = (params_ub[1] - params_lb[1]) * lhc_us[i,1] + params_lb[1] t = (params_ub[2] - params_lb[2]) * lhc_us[i,2] + params_lb[2] l = (params_ub[3] - params_lb[3]) * lhc_us[i,3] + params_lb[3] params_group = [ar,w,t,l] lhc_ps = np.append(lhc_ps, [params_group], axis=0) design_matrix = lhc_ps print design_matrix # # ******************************************************************************** # Optimisation pre-processing and function definitions # ********************************************************************************
def lhc_sample(self, nexus, dimension=['', -1]): """ this function creates and scales the sample based on the input upper and lower bounds. """ fid = dimension[0] size = dimension[1] if size == -1: size = self.sample_plan.size inputs = nexus.optimization_problem.inputs names = inputs[:, 0] # names bounds = inputs[:, 2] # bounds [l,u] scale = inputs[:, 3] # scaling units = inputs[:, -1] * 1.0 inputs[:, -1] = units num_var = np.shape(names)[0] # get upper and lower bounds for i in range(0, num_var): if i == 0: ub = [bounds[i][1]] lb = [bounds[i][0]] else: ub.append(bounds[i][1]) lb.append(bounds[i][0]) # this should always perform, but in case is already ndarray: if isinstance(ub, np.ndarray) == False: ub = np.array(ub) lb = np.array(lb) # print 'did lb ub' # make sample - latin hypercube: random (fast), optimal (slow) sampleplan = samplingplan(num_var) # Get lhc sample size # ==== FOR GENERAL RUN, SINGLE FID if fid == '': size = self.sample_plan.size if size == 0 or size == -1 or None: # choose defaults print 'LHC gen: defaults chosen' if num_var <= 10: sample_size = 10 else: sample_size = 2 * num_var elif not size == 0: # controlled user choice print 'LHC gen: user choice' if size < num_var: sample_size = 2 * num_var # prevent silly choices print 'sample size too small, using minimum (number of variables or 10)' else: sample_size = size # - num_var **2 # ==== FOR LOW FIDELITY OF RUN elif fid == 'low': if size == 0 or size == -1: # choose defaults print 'LHC gen: defaults chosen' if num_var <= 10: sample_size = 10 else: sample_size = 10 * num_var elif not size == 0: # controlled user choice print 'LHC gen: user choice' if size < 10 * num_var: sample_size = 10 * num_var # prevent silly choices print 'sample size too small, using minimum (number of variables or 10)' elif size > 100 - num_var**2: sample_size = 100 - num_var**2 else: sample_size = size # ==== FOR HIGH FIDELITY OF RUN elif fid == 'high': if size == 0 or size == -1: # choose defaults print 'LHC gen: defaults chosen' sample_size = 4 * num_var elif not size == 0: # controlled user choice print 'LHC gen: user choice' if size < 4 * num_var: sample_size = 4 * num_var # prevent silly choices print 'sample size too small, using minimum (number of variables or 10)' elif size > 30: sample_size = 30 else: sample_size = size print sample_size # check if optimal or random try: if self.sample_plan.lhc_type.lower() in [ 'olh', 'optimal', 'opt', 'best', 'sehr gut', 'o' ] and size <= 50: print 'Optimal Latin Hypercube Sample' sample = sampleplan.optimallhc(sample_size, population=20, iterations=20, generation=False) elif self.sample_plan.lhc_type in [ 'rlh', 'rand', 'random', 'r', 'Rando Calrissian' ] or size > 50: print 'Random Latin Hypercube Sample' sample = sampleplan.rlh(sample_size) # -- !!! worth checking how much time impact this actually has except: use_your_words = 'Incorrect designation for latin hypercube sample type.\nPlease choose from:\n Optimal, eg: \'olh\',\'optimal\',\'o\'\n Random, eg: \'rlh\',\'rand\',\'random\',\'r\'' raise Exception(use_your_words) exit() # make them do it again # scale lhc sample to inputs scale = ub - lb # find range scaled_inputs = np.multiply(scale, sample) + lb # correct for lb # SAVE # self.sample_plan.lhc = scaled_inputs return scaled_inputs