def discretize1d_linear(self): # discretize the product of both grid1, alpha1 = self.interpolate(1, 3, 2) grid2, alpha2 = self.interpolate(1, 4, 6) jgrid, jalpha = discretizeProduct(grid1, alpha1, grid2, alpha2) # get reference values n = 200 x = np.linspace(0, 1, n) y1 = [self.f([xi])**2 for xi in x] y2 = np.array([ evalSGFunction(grid1, alpha1, np.array([xi])) * evalSGFunction(grid2, alpha2, np.array([xi])) for xi in x ]) y3 = np.array( [evalSGFunction(jgrid, jalpha, np.array([xi])) for xi in x]) assert np.sum(abs(y3 - y2)) < 1e-13 plt.plot(x, y1, label="solution") plt.plot(x, y2, label="product") plt.plot(x, y3, label="poly") plt.title( "2 linear grids different level (maxlevel=%i, deg=%i), err = %g" % (jgrid.getStorage().getMaxLevel(), getDegree(jgrid), np.max(abs(y3 - y2)))) plt.legend() plt.show()
def update(self, grid, v, gpi, params, *args, **kws): """ Compute ranking for variance estimation \argmax_{i \in \A} |v_i| \sqrt{E[\varphi_i^2]} @param grid: Grid grid @param v: numpy array coefficients """ # get grid point associated to ix gs = grid.getStorage() p = DataVector(gs.getDimension()) gs.getCoordinates(gpi, p) # get joint distribution ap = params.activeParams() U = ap.getIndependentJointDistribution() T = ap.getJointTransformation() q = T.unitToProbabilistic(p.array()) # scale surplus by probability density ix = gs.getSequenceNumber(gpi) fx = U.pdf(q) ux = evalSGFunction(grid, v, p.array()) # update the ranking return np.abs((fx**2 - fx) * v[ix] * (2 * ux - v[ix]))
def plotResultsSG(self, grid, alpha, level, maxGridSize, refinement, iteration, out): fig, ax, _ = plotSG3d(grid, alpha) ax.set_title("eval") if out: filename = os.path.join( self.pathResults, "%s_%s_d%i_%s_l%i_Nmax%i_N%i_r%s_it%i.pdf" % (self.radix, "sg" if not isFull else "fg", self.numDims, grid.getTypeAsString(), level, maxGridSize, grid.getSize(), refinement, iteration)) plt.savefig(filename) trans = self.params.getJointTransformation() fig, ax, _ = plotError3d( lambda x: self.simulation(x), lambda x: evalSGFunction(grid, alpha, trans.probabilisticToUnit(x) ), xlim=[-2, 1], ylim=[0, 1]) ax.set_title("error") if out: filename = os.path.join( self.pathResults, "%s_error_%s_d%i_%s_l%i_Nmax%i_N%i_r%s_it%i.pdf" % (self.radix, "sg" if not isFull else "fg", self.numDims, grid.getTypeAsString(), level, maxGridSize, grid.getSize(), refinement, iteration)) plt.savefig(filename) if not out: plt.show()
def makeAddedNodalValuesPositive(self, grid, alpha, addedGridPoints, tol=-1e-14): neg = [] gs = grid.getStorage() x = DataVector(gs.getDimension()) for gp in addedGridPoints: gp.getStandardCoordinates(x) yi = evalSGFunction(grid, alpha, x.array()) if yi < tol: i = gs.getSequenceNumber(gp) alpha[i] -= yi assert alpha[i] > -1e-14 assert evalSGFunction(grid, alpha, x.array()) < 1e-14 return alpha
def discretize1d_identity(self): # discretize the product of both grid, alpha = self.interpolate(1, 3, 4) jgrid, jalpha = discretizeProduct(grid, alpha, grid, alpha) # get reference values n = 200 x = np.linspace(0, 1, n) y1 = np.array([self.f([xi])**2 for xi in x]) y2 = np.array( [evalSGFunction(grid, alpha, np.array([xi]))**2 for xi in x]) y3 = np.array( [evalSGFunction(jgrid, jalpha, np.array([xi])) for xi in x]) assert np.sum(abs(y3 - y2)) < 1e-13 plt.plot(x, y1, label="solution") plt.plot(x, y2, label="product") plotSG1d(jgrid, jalpha, n=n, label="poly") plt.title("1 linear grid same level (maxlevel=%i, deg=%i), err = %g" % (jgrid.getStorage().getMaxLevel(), getDegree(jgrid), np.sum(abs(y3 - y2)))) plt.legend() plt.show()
def run_adaptive_sparse_grid(self, gridType, level, maxGridSize, refinement, boundaryLevel=None, isFull=False, out=False, plot=False): test_samples, test_values = self.getTestSamples() # ---------------------------------------------------------- # define the learner # ---------------------------------------------------------- uqManager = TestEnvironmentSG().buildSetting(self.params, self.simulation, level, gridType, deg=20, maxGridSize=maxGridSize, isFull=isFull, adaptive=refinement, adaptPoints=10, adaptRate=0.05, epsilon=1e-10, boundaryLevel=boundaryLevel) # ---------------------------------------------- # first run while uqManager.hasMoreSamples(): uqManager.runNextSamples() # ---------------------------------------------------------- # specify ASGC estimator # ---------------------------------------------------------- analysis = ASGCAnalysisBuilder().withUQManager(uqManager)\ .withAnalyticEstimationStrategy()\ .andGetResult() analysis.setVerbose(False) # ---------------------------------------------------------- # expectation values and variances sg_mean, sg_var = analysis.mean(), analysis.var() stats = {} iterations = uqManager.getKnowledge().getAvailableIterations() for k, iteration in enumerate(iterations): # ---------------------------------------------------------- # estimated anova decomposition anova = analysis.getAnovaDecomposition(iteration=iteration, nk=len(self.params)) # estimate the l2 error grid, alpha = uqManager.getKnowledge().getSparseGridFunction(iteration=iteration) test_values_pred = evalSGFunction(grid, alpha, test_samples) l2test, l1test, maxErrorTest, meanError, varError = \ self.getErrors(test_values, test_values_pred, sg_mean[iteration][0], sg_var[iteration][0]) # ---------------------------------------------------------- # main effects sobol_indices = anova.getSobolIndices() total_effects = computeTotalEffects(sobol_indices) print("-" * 60) print("iteration=%i, N=%i" % (iteration, grid.getSize())) print("E[x] = %g ~ %g (err=%g)" % (self.E_ana[0], sg_mean[iteration]["value"], np.abs(self.E_ana[0] - sg_mean[iteration]["value"]))) print("V[x] = %g ~ %g (err=%g)" % (self.V_ana[0], sg_var[iteration]["value"], np.abs(self.V_ana[0] - sg_var[iteration]["value"]))) stats[grid.getSize()] = {'num_model_evaluations': grid.getSize(), 'l2test': l2test, 'l1test': l1test, 'maxErrorTest': maxErrorTest, 'mean_error': meanError, 'var_error': varError, 'mean_estimated': sg_mean[iteration]["value"], 'var_estimated': sg_var[iteration]["value"], 'sobol_indices_estimated': sobol_indices, 'total_effects_estimated': total_effects} if plot: self.plotResultsSG(grid, alpha, level, maxGridSize, refinement, iteration, out) if out: # store results filename = os.path.join(self.pathResults, "%s_%s_d%i_%s_l%i_Nmax%i_r%s_N%i.pkl" % (self.radix, "sg" if not isFull else "fg", self.numDims, grid.getTypeAsString(), level, maxGridSize, refinement, grid.getSize())) fd = open(filename, "w") pkl.dump({'surrogate': 'sg', 'model': "full" if self.numDims == 4 else "reduced", 'num_dims': self.numDims, 'grid_type': grid.getTypeAsString(), 'level': level, 'max_grid_size': maxGridSize, 'is_full': isFull, 'refinement': refinement, 'mean_analytic': self.E_ana[0], 'var_analytic': self.V_ana[0], 'results': stats}, fd) fd.close()
def run_regular_sparse_grid(self, gridType, level, maxGridSize, boundaryLevel=1, isFull=False, out=False, plot=False): np.random.seed(1234567) test_samples, test_values = self.getTestSamples() stats = {} while True: print("-" * 80) print("level = %i" % level) uqManager = TestEnvironmentSG().buildSetting(self.params, self.simulation, level, gridType, deg=20, maxGridSize=maxGridSize, isFull=isFull, boundaryLevel=boundaryLevel) if uqManager.sampler.getSize() > maxGridSize: print("DONE: %i > %i" % (uqManager.sampler.getSize(), maxGridSize)) break # ---------------------------------------------- # first run while uqManager.hasMoreSamples(): uqManager.runNextSamples() # ---------------------------------------------------------- # specify ASGC estimator analysis = ASGCAnalysisBuilder().withUQManager(uqManager)\ .withAnalyticEstimationStrategy()\ .andGetResult() analysis.setVerbose(False) # ---------------------------------------------------------- # expectation values and variances sg_mean, sg_var = analysis.mean(), analysis.var() # ---------------------------------------------------------- # estimate the l2 error grid, alpha = uqManager.getKnowledge().getSparseGridFunction() test_values_pred = evalSGFunction(grid, alpha, test_samples) l2test, l1test, maxErrorTest, meanError, varError = \ self.getErrors(test_values, test_values_pred, sg_mean["value"], sg_var["value"]) print("-" * 60) print("test: |.|_2 = %g" % l2test) print("E[x] = %g ~ %g (err=%g)" % (self.E_ana[0], sg_mean["value"], np.abs(self.E_ana[0] - sg_mean["value"]))) print("V[x] = %g ~ %g (err=%g)" % (self.V_ana[0], sg_var["value"], np.abs(self.V_ana[0] - sg_var["value"]))) # ---------------------------------------------------------- # estimated anova decomposition if self.inputSpace != "sgde": anova = analysis.getAnovaDecomposition(nk=len(self.params)) sobol_indices = anova.getSobolIndices() total_effects = computeTotalEffects(sobol_indices) else: sobol_indices = {} total_effects = {} # ---------------------------------------------------------- stats[level] = {'num_model_evaluations': grid.getSize(), 'l2test': l2test, 'l1test': l1test, 'maxErrorTest': maxErrorTest, 'mean_error': meanError, 'var_error': varError, 'mean_estimated': sg_mean["value"], 'var_estimated': sg_var["value"], 'sobol_indices_estimated': sobol_indices, 'total_effects_estimated': total_effects} if plot: self.plotResultsSG(grid, alpha, level, maxGridSize, False, 0, out) level += 1 if out: # store results filename = os.path.join(self.pathResults, "%s_%s_d%i_%s_Nmax%i_r%i_N%i.pkl" % (self.radix, "sg" if not isFull else "fg", self.numDims, grid.getTypeAsString(), maxGridSize, False, grid.getSize())) fd = open(filename, "w") pkl.dump({'surrogate': 'sg', 'num_dims': self.numDims, 'grid_type': grid.getTypeAsString(), 'max_grid_size': maxGridSize, 'level': level, 'boundaryLevel': boundaryLevel, 'is_full': isFull, 'refinement': False, 'mean_analytic': self.E_ana[0], 'var_analytic': self.V_ana[0], 'results': stats}, fd) fd.close()
def tesst_squared(self): # parameters level = 3 gridConfig = RegularGridConfiguration() gridConfig.type_ = GridType_Linear gridConfig.maxDegree_ = 2 # max(2, level + 1) gridConfig.boundaryLevel_ = 0 gridConfig.dim_ = 2 def f(x): return np.prod(8 * x * (1 - x)) # -------------------------------------------------------------------------- # define parameters paramsBuilder = ParameterBuilder() up = paramsBuilder.defineUncertainParameters() for idim in range(gridConfig.dim_): up.new().isCalled("x_%i" % idim).withUniformDistribution(0, 1) params = paramsBuilder.andGetResult() U = params.getIndependentJointDistribution() T = params.getJointTransformation() # -------------------------------------------------------------------------- grid = pysgpp.Grid.createGrid(gridConfig) gs = grid.getStorage() grid.getGenerator().regular(level) nodalValues = np.ndarray(gs.getSize()) weightedNodalValues = np.ndarray(gs.getSize()) p = DataVector(gs.getDimension()) for i in range(gs.getSize()): gp = gs.getCoordinates(gs.getPoint(i), p) nodalValues[i] = f(p.array())**2 weightedNodalValues[i] = f(p.array())**2 * U.pdf( T.unitToProbabilistic(p)) # -------------------------------------------------------------------------- alpha_vec = pysgpp.DataVector(nodalValues) pysgpp.createOperationHierarchisation(grid).doHierarchisation( alpha_vec) alpha = alpha_vec.array() checkInterpolation(grid, alpha, nodalValues, epsilon=1e-13) # -------------------------------------------------------------------------- alpha_vec = pysgpp.DataVector(weightedNodalValues) pysgpp.createOperationHierarchisation(grid).doHierarchisation( alpha_vec) weightedAlpha = alpha_vec.array() checkInterpolation(grid, weightedAlpha, weightedNodalValues, epsilon=1e-13) # -------------------------------------------------------------------------- # np.random.seed(1234567) i = np.random.randint(0, gs.getSize()) gpi = gs.getPoint(i) gs.getCoordinates(gpi, p) print(evalSGFunction(grid, alpha, p.array())) print(evalSGFunctionBasedOnParents(grid, alpha, gpi)) # -------------------------------------------------------------------------- # check refinement criterion ranking = SquaredSurplusRanking() squared_surplus_rank = ranking.rank(grid, gpi, weightedAlpha, params) if self.verbose: print("rank squared surplus: %g" % (squared_surplus_rank, )) # -------------------------------------------------------------------------- # check refinement criterion ranking = AnchoredMeanSquaredOptRanking() anchored_mean_squared_rank = ranking.rank(grid, gpi, alpha, params) if self.verbose: print("rank mean squared : %g" % (anchored_mean_squared_rank, ))
def test_anchored_variance_opt(self): # parameters level = 4 gridConfig = RegularGridConfiguration() gridConfig.type_ = GridType_Linear gridConfig.maxDegree_ = 2 # max(2, level + 1) gridConfig.boundaryLevel_ = 0 gridConfig.dim_ = 2 # mu = np.ones(gridConfig.dim_) * 0.5 # cov = np.diag(np.ones(gridConfig.dim_) * 0.1 / 10.) # dist = MultivariateNormal(mu, cov, 0, 1) # problems in 3d/l2 # f = lambda x: dist.pdf(x) def f(x): return np.prod(4 * x * (1 - x)) def f(x): return np.arctan( 50 * (x[0] - .35)) + np.pi / 2 + 4 * x[1]**3 + np.exp(x[0] * x[1] - 1) # -------------------------------------------------------------------------- # define parameters paramsBuilder = ParameterBuilder() up = paramsBuilder.defineUncertainParameters() for idim in range(gridConfig.dim_): up.new().isCalled("x_%i" % idim).withBetaDistribution(3, 3, 0, 1) params = paramsBuilder.andGetResult() U = params.getIndependentJointDistribution() T = params.getJointTransformation() # -------------------------------------------------------------------------- grid = pysgpp.Grid.createGrid(gridConfig) gs = grid.getStorage() grid.getGenerator().regular(level) nodalValues = np.ndarray(gs.getSize()) p = DataVector(gs.getDimension()) for i in range(gs.getSize()): gp = gs.getCoordinates(gs.getPoint(i), p) nodalValues[i] = f(p.array()) # -------------------------------------------------------------------------- alpha_vec = pysgpp.DataVector(nodalValues) pysgpp.createOperationHierarchisation(grid).doHierarchisation( alpha_vec) alpha = alpha_vec.array() checkInterpolation(grid, alpha, nodalValues, epsilon=1e-13) # -------------------------------------------------------------------------- i = np.random.randint(0, gs.getSize()) gpi = gs.getPoint(i) # -------------------------------------------------------------------------- # check refinement criterion ranking = AnchoredVarianceOptRanking() var_rank = ranking.rank(grid, gpi, alpha, params) if self.verbose: print("rank anchored var: %g" % (var_rank, )) # -------------------------------------------------------------------------- # compute the mean and the variance of the new grid x = DataVector(gs.getDimension()) gs.getCoordinates(gpi, x) x = x.array() uwxi = evalSGFunction(grid, alpha, x) - alpha[i] fx = U.pdf(T.unitToProbabilistic(x)) var_rank_estimated = np.abs( (fx - fx**2) * (-alpha[i]**2 - 2 * alpha[i] * uwxi)) if self.verbose: print("rank anchored var: %g" % (var_rank_estimated, )) if self.verbose: print("-" * 80) print("diff: |var - var_estimated| = %g" % (np.abs(var_rank - var_rank_estimated), ))
def run_sparse_grids(self, gridType, level, maxGridSize, isFull, refinement=None, out=False): # ---------------------------------------------------------- # define the learner # ---------------------------------------------------------- uqManager = TestEnvironmentSG().buildSetting(self.params, self.simulation, level, gridType, deg=10, maxGridSize=maxGridSize, isFull=isFull, adaptive=refinement, adaptPoints=3, epsilon=1e-3) # ---------------------------------------------- # first run while uqManager.hasMoreSamples(): uqManager.runNextSamples() # ---------------------------------------------------------- # specify ASGC estimator # ---------------------------------------------------------- analysis = ASGCAnalysisBuilder().withUQManager(uqManager)\ .withAnalyticEstimationStrategy()\ .andGetResult() # ---------------------------------------------------------- # expectation values and variances sg_mean, sg_var = analysis.mean(), analysis.var() print("-" * 60) print("V[x] = %g ~ %s" % (self.var, sg_var)) iterations = uqManager.getKnowledge().getAvailableIterations() stats = [None] * len(iterations) for k, iteration in enumerate(iterations): # ---------------------------------------------------------- # estimated anova decomposition anova = analysis.getAnovaDecomposition(iteration=iteration, nk=len(self.params)) # estimate the l2 error test_samples = np.random.random((1000, self.effectiveDims)) test_values = np.ndarray(1000) for i, sample in enumerate(test_samples): test_values[i] = self.simulation(sample) grid, alpha = uqManager.getKnowledge().getSparseGridFunction() test_values_pred = evalSGFunction(grid, alpha, test_samples) l2test = np.sqrt(np.mean(test_values - test_values_pred)**2) # ---------------------------------------------------------- # main effects sobol_indices = anova.getSobolIndices() total_effects = computeTotalEffects(sobol_indices) stats[k] = { 'num_model_evaluations': grid.getSize(), 'l2test': l2test, 'var_estimated': sg_var[0], 'var_analytic': self.var, 'sobol_indices_estimated': sobol_indices, 'total_effects_estimated': total_effects } if out: # store results filename = os.path.join( "results", "%s_%s_d%i_%s_l%i_Nmax%i_%s_N%i.pkl" % (self.radix, "sg" if not isFull else "fg", self.effectiveDims, grid.getTypeAsString(), level, maxGridSize, refinement, grid.getSize())) fd = open(filename, "w") pkl.dump( { 'surrogate': 'sg', 'model': "full" if self.effectiveDims == 4 else "reduced", 'num_dims': self.effectiveDims, 'grid_type': grid.getTypeAsString(), 'level': level, 'max_grid_size': maxGridSize, 'is_full': isFull, 'refinement': refinement, 'sobol_indices_analytic': self.sobol_indices, 'total_effects_analytic': self.total_effects, 'results': stats }, fd) fd.close() return sobol_indices, grid.getSize()
nodalValues = np.ndarray(gs.getSize()) p = DataVector(2) for i in range(gs.getSize()): gs.getCoordinates(gs.getPoint(i), p) nodalValues[i] = U.pdf(p.array()) alpha = hierarchize(grid, nodalValues) fig, _, _ = plotFunction3d(U.pdf) fig.show() fig, _, _ = plotSG3d(grid, alpha) fig.show() # find 1d cut at x1 = 0.75 x1s = np.linspace(0, 1, 200) x2 = 0.75 y = np.ndarray(x1s.shape) for i, x1 in enumerate(x1s): y[i] = evalSGFunction(grid, alpha, np.array([x1, x2])) fig = plt.figure() plt.plot(x1s, y) plt.vlines([0.125, 0.25, 0.375, 0.625, 0.75, 0.875], -40, 1) fig.show() plt.show()
def f(x): return evalSGFunction(grid1, alpha1, x) * evalSGFunction( grid2, alpha2, x)
def run_adaptive_sparse_grid(self, gridTypeStr, level, maxGridSize, boundaryLevel=1, refinement="l2", out=False): np.random.seed(1234567) test_samples, test_values = self.getTestSamples() gridType = Grid.stringToGridType(gridTypeStr) print("-" * 80) print("level = %i, boundary level = %i" % (level, boundaryLevel)) print("-" * 80) uqManager = TestEnvironmentSG().buildSetting( self.params, self.simulation, level, gridType, deg=20, maxGridSize=maxGridSize, adaptive=refinement, adaptRate=0.1, adaptPoints=20, epsilon=1e-15, boundaryLevel=min(level, boundaryLevel), knowledgeTypes=[KnowledgeTypes.SIMPLE, KnowledgeTypes.SQUARED]) # ---------------------------------------------- # first run while uqManager.hasMoreSamples(): uqManager.runNextSamples() # ---------------------------------------------------------- # specify ASGC estimator # ---------------------------------------------------------- analysis = ASGCAnalysisBuilder().withUQManager(uqManager)\ .withMonteCarloEstimationStrategy(n=1000, npaths=10)\ .andGetResult() analysis.setVerbose(False) # ---------------------------------------------------------- # expectation values and variances stats = {} iterations = uqManager.getKnowledge().getAvailableIterations() for k, iteration in enumerate(iterations): # ---------------------------------------------------------- # estimate the l2 error grid, alpha = uqManager.getKnowledge().getSparseGridFunction( iteration=iteration) test_values_pred = evalSGFunction(grid, alpha, test_samples) l2test, l1test, maxErrorTest = \ self.getErrors(test_values, test_values_pred) print("-" * 60) print("iteration=%i, N=%i" % (iteration, grid.getSize())) print("test: |.|_2 = %g" % l2test) # sg_mean, sg_var = analysis.mean(iterations=[iteration]), analysis.var(iterations=[iteration]) # ---------------------------------------------------------- stats[grid.getSize()] = { 'num_model_evaluations': grid.getSize(), 'l2test': l2test, 'l1test': l1test, 'maxErrorTest': maxErrorTest, 'mean_estimated': None, # sg_mean["value"], 'var_estimated': None # sg_var["value"] } if out: # store results radix = "%s_sg_d%i_%s_Nmax%i_r%s_N%i_b%i" % ( self.radix, self.numDims, grid.getTypeAsString(), maxGridSize, refinement, grid.getSize(), boundaryLevel) if self.rosenblatt: radix += "_rosenblatt" filename = os.path.join(self.pathResults, "%s.pkl" % radix) fd = open(filename, "w") pkl.dump( { 'surrogate': 'sg', 'num_dims': self.numDims, 'grid_type': grid.getTypeAsString(), 'max_grid_size': maxGridSize, 'is_full': False, 'refinement': refinement, 'rosenblatt': self.rosenblatt, 'boundaryLevel': boundaryLevel, 'results': stats }, fd) fd.close()
def run_regular_sparse_grid(self, gridTypeStr, level, maxGridSize, boundaryLevel=1, out=False): np.random.seed(1234567) test_samples, test_values = self.getTestSamples() gridType = Grid.stringToGridType(gridTypeStr) stats = {} while True: print("-" * 80) print("level = %i, boundary level = %i" % (level, boundaryLevel)) print("-" * 80) uqManager = TestEnvironmentSG().buildSetting( self.params, self.simulation, level, gridType, deg=20, maxGridSize=maxGridSize, boundaryLevel=min(level, boundaryLevel), knowledgeTypes=[KnowledgeTypes.SIMPLE]) if uqManager.sampler.getSize() > maxGridSize: print("DONE: %i > %i" % (uqManager.sampler.getSize(), maxGridSize)) break # ---------------------------------------------- # first run while uqManager.hasMoreSamples(): uqManager.runNextSamples() # ---------------------------------------------------------- if False: grid, alpha = uqManager.knowledge.getSparseGridFunction() samples = DataMatrix(grid.getSize(), self.numDims) grid.getStorage().getCoordinateArrays(samples) samples = self.dist.ppf(samples.array()) fig = plt.figure() plotFunction2d(self.simulation, color_bar_label=r"$u(\xi_1, \xi_2)$") plt.scatter( samples[:, 0], samples[:, 1], color=load_color(3), label=r"SG (CC-bound., $\ell=%i, \ell^{\text{b}}=%i$)" % (level, boundaryLevel)) plt.xlabel(r"$\xi_1$") plt.xlabel(r"$\xi_2$") lgd = insert_legend(fig, loc="bottom", ncol=1) savefig(fig, "plots/genz_with_grid_l%i_b%i" % (level, boundaryLevel), lgd, tikz=False) # ---------------------------------------------------------- # specify ASGC estimator analysis = ASGCAnalysisBuilder().withUQManager(uqManager)\ .withMonteCarloEstimationStrategy(n=1000, npaths=10)\ .andGetResult() analysis.setVerbose(False) # ---------------------------------------------------------- # expectation values and variances sg_mean, sg_var = analysis.mean(), analysis.var() # ---------------------------------------------------------- # estimate the l2 error grid, alpha = uqManager.getKnowledge().getSparseGridFunction() test_values_pred = evalSGFunction(grid, alpha, test_samples) l2test, l1test, maxErrorTest = \ self.getErrors(test_values, test_values_pred) print("-" * 60) print("test: |.|_2 = %g" % l2test) # ---------------------------------------------------------- stats[level] = { 'num_model_evaluations': grid.getSize(), 'l2test': l2test, 'l1test': l1test, 'maxErrorTest': maxErrorTest, 'mean_estimated': sg_mean["value"], 'var_estimated': sg_var["value"] } level += 1 if out: # store results radix = "%s_sg_d%i_%s_Nmax%i_N%i_b%i" % ( self.radix, self.numDims, grid.getTypeAsString(), maxGridSize, grid.getSize(), boundaryLevel) if self.rosenblatt: radix += "_rosenblatt" filename = os.path.join(self.pathResults, "%s.pkl" % radix) fd = open(filename, "w") pkl.dump( { 'surrogate': 'sg', 'num_dims': self.numDims, 'grid_type': grid.getTypeAsString(), 'max_grid_size': maxGridSize, 'is_full': False, 'refinement': False, 'rosenblatt': self.rosenblatt, 'boundaryLevel': boundaryLevel, 'results': stats }, fd) fd.close()
def run_regular_sparse_grid_boundary(self, gridTypeStr, level, maxGridSize, boundaryLevel=1, out=False): np.random.seed(1234567) test_samples, test_values = self.getTestSamples() gridType = Grid.stringToGridType(gridTypeStr) stats = {} while boundaryLevel <= level: print("-" * 80) print("level = %i, boundary level = %i" % (level, boundaryLevel)) print("-" * 80) uqManager = TestEnvironmentSG().buildSetting( self.params, self.simulation, level, gridType, deg=20, maxGridSize=maxGridSize, boundaryLevel=boundaryLevel, knowledgeTypes=[KnowledgeTypes.SIMPLE]) # ---------------------------------------------- # first run while uqManager.hasMoreSamples(): uqManager.runNextSamples() # ---------------------------------------------------------- # specify ASGC estimator analysis = ASGCAnalysisBuilder().withUQManager(uqManager)\ .withMonteCarloEstimationStrategy(n=1000, npaths=10)\ .andGetResult() analysis.setVerbose(False) # ---------------------------------------------------------- # expectation values and variances sg_mean, sg_var = analysis.mean(), analysis.var() # ---------------------------------------------------------- # estimate the l2 error grid, alpha = uqManager.getKnowledge().getSparseGridFunction() test_values_pred = evalSGFunction(grid, alpha, test_samples) l2test, l1test, maxErrorTest = \ self.getErrors(test_values, test_values_pred) print("-" * 60) print("test: |.|_2 = %g" % l2test) # ---------------------------------------------------------- # ---------------------------------------------------------- stats[boundaryLevel] = { 'num_model_evaluations': grid.getSize(), 'l2test': l2test, 'l1test': l1test, 'maxErrorTest': maxErrorTest, 'mean_estimated': sg_mean["value"], 'var_estimated': sg_var["value"] } boundaryLevel += 1 if out: # store results filename = os.path.join( self.pathResults, "%s_sg_d%i_%s_Nmax%i_N%i_b%i.pkl" % (self.radix, self.numDims, grid.getTypeAsString(), maxGridSize, grid.getSize(), boundaryLevel)) fd = open(filename, "w") pkl.dump( { 'surrogate': 'sg', 'num_dims': self.numDims, 'grid_type': grid.getTypeAsString(), 'max_grid_size': maxGridSize, 'is_full': False, 'refinement': False, 'results': stats }, fd) fd.close()