def testMarginalization_3D(self): xlim = [[0, 1], [0, 1], [0, 1]] def f(x): return np.prod([4 * xi * (1 - xi) for xi in x]) d = [0] p = [0.1, 0.2, 0.5] # get marginalized sparse grid function level = 5 grid, alpha = interpolate(f, level, 3) n_grid, n_alpha, _ = doMarginalize(grid, alpha, linearForm=None, dd=d[:]) self.assertEqual(doQuadrature(n_grid, n_alpha), doQuadrature(n_grid, n_alpha)) xlim = [[0, 1], [0, 1], [0, 1]] # Quantity of interest bs = [0.1, 0.2, 1.5] def g(x, a): return abs((4. * x - 2.) + a) / (a + 1.) def h(xs): return np.prod([g(x, b) for x, b in zip(xs, bs)]) d = [0] p = [0.0, 0.3, 0.2] # get marginalized sparse grid function level = 5 grid, alpha = interpolate(h, level, 3) n_grid, n_alpha, _ = doMarginalize(grid, alpha, linearForm=None, dd=d[:]) s1 = doQuadrature(n_grid, n_alpha) n_grid, n_alpha, _ = doMarginalize(grid, alpha, linearForm=None, dd=[0]) s2 = doQuadrature(n_grid, n_alpha) s3 = doQuadrature(grid, alpha) self.assertTrue(abs(s1 - s2) < 1e-10) self.assertTrue(abs(s1 - s3) < 1e-10) self.assertTrue(abs(s2 - s3) < 1e-10)
def testMarginalization_2D(self): xlim = [[0, 1], [0, 1]] def f(x): return np.prod([4 * xi * (1 - xi) for xi in x]) d = [1] p = [0.5, 0.5] # get marginalized sparse grid function level = 5 grid, alpha = interpolate(f, level, 2) n_grid, n_alpha, err = doMarginalize(grid, alpha, linearForm=None, dd=d) # self.assertTrue(abs(q.quad(f, p, d[:], xlim, 10) - 2./3) < 1e-5) # self.assertTrue(abs(q.monte_carlo(f, p, d[:], xlim, 8192) - 2./3) < 1e-2) # self.assertTrue(abs(createOperationEval(n_grid).eval(n_alpha, DataVector([p.getCoord(1 - d[0])]])) - 2./3) < 1e-3) s1 = doQuadrature(n_grid, n_alpha) s2 = doQuadrature(grid, alpha) self.assertTrue(abs(s1 - s2) < 1e-14)
def testMarginalEstimationStrategy(self): xlim = np.array([[-1, 1], [-1, 1]]) trans = JointTransformation() dists = [] for idim in range(xlim.shape[0]): trans.add(LinearTransformation(xlim[idim, 0], xlim[idim, 1])) dists.append(Uniform(xlim[idim, 0], xlim[idim, 1])) dist = J(dists) def f(x): return np.prod([(1 + xi) * (1 - xi) for xi in x]) def F(x): return 1. - x**3 / 3. grid, alpha_vec = interpolate(f, 1, 2, gridType=GridType_Poly, deg=2, trans=trans) alpha = alpha_vec.array() q = (F(1) - F(-1))**2 q1 = doQuadrature(grid, alpha) q2 = AnalyticEstimationStrategy().mean(grid, alpha, dist, trans)["value"] self.assertTrue(abs(q - q1) < 1e-10) self.assertTrue(abs(q - q2) < 1e-10) ngrid, nalpha, _ = MarginalAnalyticEstimationStrategy().mean( grid, alpha, dist, trans, [[0]]) self.assertTrue(abs(nalpha[0] - 2. / 3.) < 1e-10) plotSG3d(grid, alpha) plt.figure() plotSG1d(ngrid, nalpha) plt.show()
def test2DCovarianceMatrix(self): # prepare data np.random.seed(1234567) C = np.array([[0.3, 0.09], [0.09, 0.3]]) / 10. U = dists.MultivariateNormal([0.5, 0.5], C, 0, 1) samples = U.rvs(2000) kde = KDEDist(samples) sgde = SGDEdist.byLearnerSGDEConfig( samples, bounds=U.getBounds(), config={ "grid_level": 5, "grid_type": "linear", "grid_maxDegree": 1, "refinement_numSteps": 0, "refinement_numPoints": 10, "solver_threshold": 1e-10, "solver_verbose": False, "regularization_type": "Laplace", "crossValidation_lambda": 3.16228e-06, "crossValidation_enable": False, "crossValidation_kfold": 5, "crossValidation_silent": False, "sgde_makePositive": True, "sgde_makePositive_candidateSearchAlgorithm": "joined", "sgde_makePositive_interpolationAlgorithm": "setToZero", "sgde_generateConsistentGrid": True, "sgde_unitIntegrand": True }) sgde_x1 = sgde.marginalizeToDimX(0) sgde_x2 = sgde.marginalizeToDimX(1) plt.figure() plotDensity1d(sgde_x1, label="x1") plotDensity1d(sgde_x2, label="x2") plt.title( "mean: x1=%g, x2=%g; var: x1=%g, x2=%g" % (sgde_x1.mean(), sgde_x2.mean(), sgde_x1.var(), sgde_x2.var())) plt.legend() jsonStr = sgde.toJson() jsonObject = json.loads(jsonStr) sgde = Dist.fromJson(jsonObject) fig = plt.figure() plotDensity2d(U, addContour=True) plt.title("analytic") fig = plt.figure() plotDensity2d(kde, addContour=True) plt.title("kde") fig = plt.figure() plotDensity2d(sgde, addContour=True) plt.title("sgde (I(f) = %g)" % (doQuadrature(sgde.grid, sgde.alpha), )) # print the results print("E(x) ~ %g ~ %g" % (kde.mean(), sgde.mean())) print("V(x) ~ %g ~ %g" % (kde.var(), sgde.var())) print("-" * 60) print(kde.cov()) print(sgde.cov()) self.assertTrue(np.linalg.norm(C - kde.cov()) < 1e-2, "KDE cov wrong") self.assertTrue( np.linalg.norm(np.corrcoef(samples.T) - kde.corrcoeff()) < 1e-1, "KDE corrcoef wrong") plt.show()
def test2DNormalMoments(self): mean = 0 var = 0.5 U = dists.J( [dists.Normal(mean, var, -2, 2), dists.Normal(mean, var, -2, 2)]) np.random.seed(1234567) trainSamples = U.rvs(1000) dist = SGDEdist.byLearnerSGDEConfig(trainSamples, config={ "grid_level": 5, "grid_type": "linear", "refinement_numSteps": 0, "refinement_numPoints": 10, "regularization_type": "Laplace", "crossValidation_lambda": 0.000562341, "crossValidation_enable": False, "crossValidation_kfold": 5, "crossValidation_silent": True, "sgde_makePositive": True }, bounds=U.getBounds()) samples_dist = dist.rvs(1000, shuffle=True) kde = KDEDist(trainSamples) samples_kde = kde.rvs(1000, shuffle=True) # ----------------------------------------------- self.assertTrue( np.abs(U.mean() - dist.mean()) < 1e-2, "SGDE mean wrong") self.assertTrue( np.abs(U.var() - dist.var()) < 4e-2, "SGDE variance wrong") # ----------------------------------------------- # print the results print("E(x) ~ %g ~ %g" % (kde.mean(), dist.mean())) print("V(x) ~ %g ~ %g" % (kde.var(), dist.var())) print( "log ~ %g ~ %g" % (kde.crossEntropy(trainSamples), dist.crossEntropy(trainSamples))) print("-" * 60) print(dist.cov()) print(kde.cov()) sgde_x1 = dist.marginalizeToDimX(0) kde_x1 = kde.marginalizeToDimX(0) plt.figure() plotDensity1d(U.getDistributions()[0], label="analytic") plotDensity1d(sgde_x1, label="sgde") plotDensity1d(kde_x1, label="kde") plt.title("mean: sgde=%g, kde=%g; var: sgde=%g, kde=%g" % (sgde_x1.mean(), kde_x1.mean(), sgde_x1.var(), kde_x1.var())) plt.legend() fig = plt.figure() plotDensity2d(U, addContour=True) plt.title("analytic") fig = plt.figure() plotDensity2d(kde, addContour=True) plt.scatter(samples_kde[:, 0], samples_kde[:, 1]) plt.title("kde") fig = plt.figure() plotDensity2d(dist, addContour=True) plt.scatter(samples_dist[:, 0], samples_dist[:, 1]) plt.title( "sgde (I(f) = %g)" % (np.prod(U.getBounds()) * doQuadrature(dist.grid, dist.alpha), )) plt.show()
def computeBilinearForm(grid, U): """ Compute bilinear form (A)_ij = \int phi_i phi_j dU(x) on measure U, which is in this case supposed to be a lebesgue measure. @param grid: Grid, sparse grid @param U: list of distributions, Lebeasgue measure @return: DataMatrix """ gs = grid.getStorage() basis = getBasis(grid) # interpolate phi_i phi_j on sparse grid with piecewise polynomial SG # the product of two piecewise linear functions is a piecewise # polynomial one of degree 2. ngrid = Grid.createPolyBoundaryGrid(1, 2) # ngrid = Grid.createLinearBoundaryGrid(1) ngrid.getGenerator().regular(gs.getMaxLevel() + 1) ngs = ngrid.getStorage() nodalValues = DataVector(ngs.size()) level = DataMatrix(gs.size(), gs.getDimension()) index = DataMatrix(gs.size(), gs.getDimension()) gs.getLevelIndexArraysForEval(level, index) A = DataMatrix(gs.size(), gs.size()) s = np.ndarray(gs.getDimension(), dtype='float') # run over all rows for i in range(gs.size()): gpi = gs.getPoint(i) # run over all columns for j in range(i, gs.size()): # print "%i/%i" % (i * gs.size() + j + 1, gs.size() ** 2) gpj = gs.getPoint(j) # run over all dimensions for d in range(gs.getDimension()): # get level index lid, iid = level.get(i, d), index.get(i, d) ljd, ijd = level.get(j, d), index.get(j, d) # compute left and right boundary of the support of both # basis functions lb = max([((iid - 1) / lid), ((ijd - 1) / ljd)]) ub = min([((iid + 1) / lid), ((ijd + 1) / ljd)]) # same level, different index if lid == ljd and iid != ijd: s[d] = 0. # the support does not overlap elif lid != ljd and lb >= ub: s[d] = 0. else: # ---------------------------------------------------- # do the 1d interpolation ... lid, iid = gpi.getLevel(d), int(iid) ljd, ijd = gpj.getLevel(d), int(ijd) for k in range(ngs.size()): x = ngs.getCoordinate(ngs.getPoint(k), 0) nodalValues[k] = max(0, basis.eval(lid, iid, x)) * \ max(0, basis.eval(ljd, ijd, x)) # ... by hierarchization v = hierarchize(ngrid, nodalValues) def f(x, y): return float(y * U[d].pdf(x[0])) g, w, _ = discretize(ngrid, v, f, refnums=0) # compute the integral of it s[d] = doQuadrature(g, w) # ---------------------------------------------------- # store result in matrix A.set(i, j, float(np.prod(s))) A.set(j, i, A.get(i, j)) return A
def computeBilinearFormEntry(self, gs, gpi, basisi, gpj, basisj, d): # if not, compute it ans = 1 err = 0. # interpolating 1d sparse grid ngrid = Grid.createPolyBoundaryGrid(1, 2) ngrid.getGenerator().regular(2) ngs = ngrid.getStorage() nodalValues = DataVector(ngs.getSize()) for d in range(gpi.getDimension()): # get level index lid, iid = gpi.getLevel(d), gpi.getIndex(d) ljd, ijd = gpj.getLevel(d), gpj.getIndex(d) # compute left and right boundary of the support of both # basis functions xlowi, xhighi = getBoundsOfSupport(gs, lid, iid) xlowj, xhighj = getBoundsOfSupport(gs, ljd, ijd) xlow = max(xlowi, xlowj) xhigh = min(xhighi, xhighj) # same level, different index if lid == ljd and iid != ijd and lid > 0: return 0., 0. # the support does not overlap elif lid != ljd and xlow >= xhigh: return 0., 0. else: # ---------------------------------------------------- # do the 1d interpolation ... # define transformation function T = LinearTransformation(xlow, xhigh) for k in range(ngs.getSize()): x = ngs.getCoordinate(ngs.getPoint(k), 0) x = T.unitToProbabilistic(x) nodalValues[k] = basisi.eval(lid, iid, x) * \ basisj.eval(ljd, ijd, x) # ... by hierarchization v = hierarchize(ngrid, nodalValues) # discretize the following function def f(x, y): xp = T.unitToProbabilistic(x) return float(y * self._U[d].pdf(xp)) # sparse grid quadrature g, w, err1d = discretize(ngrid, v, f, refnums=0, level=5, useDiscreteL2Error=False) s = T.vol() * doQuadrature(g, w) # fig = plt.figure() # plotSG1d(ngrid, v) # x = np.linspace(xlow, ub, 100) # plt.plot(np.linspace(0, 1, 100), U[d].pdf(x)) # fig.show() # fig = plt.figure() # plotSG1d(g, w) # x = np.linspace(0, 1, 100) # plt.plot(x, # [evalSGFunction(ngrid, v, DataVector([xi])) * U[d].pdf(T.unitToProbabilistic(xi)) for xi in x]) # fig.show() # plt.show() # compute the integral of it # ---------------------------------------------------- ans *= s err += err1d[1] return ans, err
def computeBilinearForm(grid, U): """ Compute bilinear form (A)_ij = \int phi_i phi_j dU(x) on measure U, which is in this case supposed to be a lebesgue measure. @param grid: Grid, sparse grid @param U: list of distributions, Lebeasgue measure @return: DataMatrix """ gs = grid.getStorage() basis = getBasis(grid) # interpolate phi_i phi_j on sparse grid with piecewise polynomial SG # the product of two piecewise linear functions is a piecewise # polynomial one of degree 2. ngrid = Grid.createPolyBoundaryGrid(1, 2) # ngrid = Grid.createLinearBoundaryGrid(1) ngrid.createGridGenerator().regular(gs.getMaxLevel() + 1) ngs = ngrid.getStorage() nodalValues = DataVector(ngs.size()) level = DataMatrix(gs.size(), gs.dim()) index = DataMatrix(gs.size(), gs.dim()) gs.getLevelIndexArraysForEval(level, index) A = DataMatrix(gs.size(), gs.size()) s = np.ndarray(gs.dim(), dtype='float') # run over all rows for i in xrange(gs.size()): gpi = gs.get(i) # run over all columns for j in xrange(i, gs.size()): # print "%i/%i" % (i * gs.size() + j + 1, gs.size() ** 2) gpj = gs.get(j) # run over all dimensions for d in xrange(gs.dim()): # get level index lid, iid = level.get(i, d), index.get(i, d) ljd, ijd = level.get(j, d), index.get(j, d) # compute left and right boundary of the support of both # basis functions lb = max([(iid - 1) / lid, (ijd - 1) / ljd]) ub = min([(iid + 1) / lid, (ijd + 1) / ljd]) # same level, different index if lid == ljd and iid != ijd: s[d] = 0. # the support does not overlap elif lid != ljd and lb >= ub: s[d] = 0. else: # ---------------------------------------------------- # do the 1d interpolation ... lid, iid = gpi.getLevel(d), int(iid) ljd, ijd = gpj.getLevel(d), int(ijd) for k in xrange(ngs.size()): x = ngs.get(k).getCoord(0) nodalValues[k] = max(0, basis.eval(lid, iid, x)) * \ max(0, basis.eval(ljd, ijd, x)) # ... by hierarchization v = hierarchize(ngrid, nodalValues) def f(x, y): return float(y * U[d].pdf(x[0])) g, w, _ = discretize(ngrid, v, f, refnums=0) # compute the integral of it s[d] = doQuadrature(g, w) # ---------------------------------------------------- # store result in matrix A.set(i, j, float(np.prod(s))) A.set(j, i, A.get(i, j)) return A
def computeBilinearFormEntry(self, gpi, basisi, gpj, basisj): # check if this entry already exists for key in [self.getKey(gpi, gpj), self.getKey(gpj, gpi)]: if key in self._map: return self._map[key] # if not, compute it ans = 1 err = 0. # interpolating 1d sparse grid ngrid = Grid.createPolyBoundaryGrid(1, 2) ngrid.createGridGenerator().regular(2) ngs = ngrid.getStorage() nodalValues = DataVector(ngs.size()) for d in xrange(gpi.dim()): # get level index lid, iid = gpi.getLevel(d), gpi.getIndex(d) ljd, ijd = gpj.getLevel(d), gpj.getIndex(d) # compute left and right boundary of the support of both # basis functions xlowi, xhighi = self.getBounds(lid, iid) xlowj, xhighj = self.getBounds(ljd, ijd) xlow = max(xlowi, xlowj) xhigh = min(xhighi, xhighj) # same level, different index if lid == ljd and iid != ijd and lid > 0: return 0., 0. # the support does not overlap elif lid != ljd and xlow >= xhigh: return 0., 0. else: # ---------------------------------------------------- # do the 1d interpolation ... # define transformation function T = LinearTransformation(xlow, xhigh) for k in xrange(ngs.size()): x = ngs.get(k).getCoord(0) x = T.unitToProbabilistic(x) nodalValues[k] = basisi.eval(lid, iid, x) * \ basisj.eval(ljd, ijd, x) # ... by hierarchization v = hierarchize(ngrid, nodalValues) # discretize the following function def f(x, y): xp = T.unitToProbabilistic(x) return float(y * self._U[d].pdf(xp)) # sparse grid quadrature g, w, err1d = discretize(ngrid, v, f, refnums=0, level=5, useDiscreteL2Error=False) s = T.vol() * doQuadrature(g, w) # fig = plt.figure() # plotSG1d(ngrid, v) # x = np.linspace(xlow, ub, 100) # plt.plot(np.linspace(0, 1, 100), U[d].pdf(x)) # fig.show() # fig = plt.figure() # plotSG1d(g, w) # x = np.linspace(0, 1, 100) # plt.plot(x, # [evalSGFunction(ngrid, v, DataVector([xi])) * U[d].pdf(T.unitToProbabilistic(xi)) for xi in x]) # fig.show() # plt.show() # compute the integral of it # ---------------------------------------------------- ans *= s err += err1d[1] return ans, err