def getIntegral(grid, level, index): # create new grid if grid.getType() in [LinearBoundary, LinearL0Boundary]: return np.power(2., -max(1, level)) elif grid.getType() == Linear: # # employ 4/3 rule # if gp.isLeaf(): # q *= 4. / 3. return np.power(2., -level) elif grid.getType() == Poly: return getBasis(grid).getIntegral(level, index) elif grid.getType() == PolyBoundary: return getBasis(grid).getIntegral(level, index) else: raise AttributeError('unsupported grid type %s' % grid.getType())
def getIntegral(grid, level, index): # create new grid if grid.getType() in [ GridType_LinearBoundary, GridType_LinearTruncatedBoundary, GridType_LinearL0Boundary ]: return np.power(2., -max(1, level)) elif grid.getType() == GridType_Linear: # # employ 4/3 rule # if gp.isLeaf(): # q *= 4. / 3. return np.power(2., -level) elif grid.getType() in [ GridType_ModLinear, GridType_LinearClenshawCurtis, GridType_LinearClenshawCurtisBoundary, GridType_ModLinearClenshawCurtis, GridType_Poly, GridType_PolyBoundary, GridType_ModPoly, GridType_PolyClenshawCurtis, GridType_PolyClenshawCurtisBoundary, GridType_ModPolyClenshawCurtis, GridType_Bspline, GridType_BsplineBoundary, GridType_ModBspline, GridType_BsplineClenshawCurtis, GridType_ModBsplineClenshawCurtis ]: return getBasis(grid).getIntegral(level, index) else: raise AttributeError('unsupported grid type %s' % grid.getType())
def computeBilinearFormQuad(grid, U): gs = grid.getStorage() basis = getBasis(grid) A = DataMatrix(gs.size(), gs.size()) level = DataMatrix(gs.size(), gs.dim()) index = DataMatrix(gs.size(), gs.dim()) gs.getLevelIndexArraysForEval(level, index) s = np.ndarray(gs.dim(), dtype='float') # run over all rows for i in xrange(gs.size()): gpi = gs.get(i) # run over all columns for j in xrange(i, gs.size()): # print "%i/%i" % (i * gs.size() + j + 1, gs.size() ** 2) gpj = gs.get(j) for d in xrange(gs.dim()): # get level index lid, iid = level.get(i, d), index.get(i, d) ljd, ijd = level.get(j, d), index.get(j, d) # compute left and right boundary of the support of both # basis functions lb = max([(iid - 1) / lid, (ijd - 1) / ljd]) ub = min([(iid + 1) / lid, (ijd + 1) / ljd]) # same level, different index if lid == ljd and iid != ijd: s[d] = 0. # the support does not overlap elif lid != ljd and lb >= ub: s[d] = 0. else: lid, iid = gpi.getLevel(d), int(iid) ljd, ijd = gpj.getLevel(d), int(ijd) # ---------------------------------------------------- # use scipy for integration def f(x): return basis.eval(lid, iid, x) * \ basis.eval(ljd, ijd, x) * \ U[d].pdf(x) s[d], _ = quad(f, lb, ub, epsabs=1e-8) # ---------------------------------------------------- A.set(i, j, float(np.prod(s))) A.set(j, i, A.get(i, j)) return A
def computeBFQuad(grid, U, admissibleSet, n=100): """ @param grid: Grid @param U: list of distributions @param admissibleSet: AdmissibleSet @param n: int, number of MC samples """ gs = grid.getStorage() basis = getBasis(grid) A = DataMatrix(admissibleSet.getSize(), gs.size()) b = DataVector(admissibleSet.getSize()) s = np.ndarray(gs.dim(), dtype='float') # run over all rows for i, gpi in enumerate(admissibleSet.values()): # run over all columns for j in xrange(gs.size()): # print "%i/%i" % (i * gs.size() + j + 1, gs.size() ** 2) gpj = gs.get(j) for d in xrange(gs.dim()): # get level index lid, iid = gpi.getLevel(d), gpi.getIndex(d) ljd, ijd = gpj.getLevel(d), gpj.getIndex(d) # compute left and right boundary of the support of both # basis functions xlow = max([(iid - 1) * 2 ** -lid, (ijd - 1) * 2 ** -ljd]) xhigh = min([(iid + 1) * 2 ** -lid, (ijd + 1) * 2 ** -ljd]) # same level, different index if lid == ljd and iid != ijd: s[d] = 0. # the support does not overlap elif lid != ljd and xlow >= xhigh: s[d] = 0. else: # ---------------------------------------------------- # use scipy for integration def f(x): return basis.eval(lid, iid, x) * \ basis.eval(ljd, ijd, x) * \ U[d].pdf(x) s[d], _ = quad(f, xlow, xhigh, epsabs=1e-8) # ---------------------------------------------------- A.set(i, j, float(np.prod(s))) if gs.seq(gpi) == j: b[i] = A.get(i, j) return A, b
def computeBFQuad(grid, U, admissibleSet, n=100): """ @param grid: Grid @param U: list of distributions @param admissibleSet: AdmissibleSet @param n: int, number of MC samples """ gs = grid.getStorage() basis = getBasis(grid) A = DataMatrix(admissibleSet.getSize(), gs.size()) b = DataVector(admissibleSet.getSize()) s = np.ndarray(gs.getDimension(), dtype='float') # run over all rows for i, gpi in enumerate(admissibleSet.values()): # run over all columns for j in range(gs.size()): # print "%i/%i" % (i * gs.size() + j + 1, gs.size() ** 2) gpj = gs.getPoint(j) for d in range(gs.getDimension()): # get level index lid, iid = gpi.getLevel(d), gpi.getIndex(d) ljd, ijd = gpj.getLevel(d), gpj.getIndex(d) # compute left and right boundary of the support of both # basis functions xlow = max([(iid - 1) * 2 ** -lid, (ijd - 1) * 2 ** -ljd]) xhigh = min([(iid + 1) * 2 ** -lid, (ijd + 1) * 2 ** -ljd]) # same level, different index if lid == ljd and iid != ijd: s[d] = 0. # the support does not overlap elif lid != ljd and xlow >= xhigh: s[d] = 0. else: # ---------------------------------------------------- # use scipy for integration def f(x): return basis.eval(lid, iid, x) * \ basis.eval(ljd, ijd, x) * \ U[d].pdf(x) s[d], _ = quad(f, xlow, xhigh, epsabs=1e-8) # ---------------------------------------------------- A.set(i, j, float(np.prod(s))) if gs.getSequenceNumber(gpi) == j: b[i] = A.get(i, j) return A, b
def computeLinearForm(self, grid): """ Compute bilinear form for the current grid @param grid: Grid @return numpy array """ gs = grid.getStorage() basis = getBasis(grid) v = np.ndarray(gs.size()) err = 0. # run over all rows for i in range(gs.size()): gpi = gs.getPoint(i) # compute bilinear form for one entry v[i], erri = self.getLinearFormEntry(gs, gpi, basis) err += erri return v, err
def computeLinearForm(self, grid): """ Compute bilinear form for the current grid @param grid: Grid @return DataVector """ gs = grid.getStorage() basis = getBasis(grid) v = DataVector(gs.size()) err = 0. # run over all rows for i in xrange(gs.size()): gpi = gs.get(i) # compute bilinear form for one entry v[i], erri = self.getLinearFormEntry(gpi, basis) err += erri return v, err
def computeExpectationValueEstimation(grid, U, admissibleSet): """ Compute (b)_i = \int phi_i dU(x) on measure U, which is in this case supposed to be a lebesgue measure. @param grid: Grid, sparse grid @param U: list of distributions, Lebeasgue measure @param admissibleSet: AdmissibleSet @return: DataVector """ gs = grid.getStorage() basis = getBasis(grid) # interpolate phi_i phi_j on sparse grid with piecewise polynomial SG # the product of two piecewise linear functions is a piecewise # polynomial one of degree 2. b = DataVector(admissibleSet.getSize()) s = np.ndarray(gs.dim(), dtype='float') # run over all rows p = DataVector(gs.dim()) for i, gpi in enumerate(admissibleSet.values()): gpi.getCoords(p) for d in xrange(gs.dim()): # get level index lid, iid = gpi.getLevel(d), gpi.getIndex(d) xlow = (iid - 1) * 2**-lid xhigh = (iid + 1) * 2**-lid # ---------------------------------------------------- # use scipy integration def f(x): return basis.eval(lid, iid, x) * U[d].pdf(x) s[d], _ = quad(f, xlow, xhigh, epsabs=1e-8) # ---------------------------------------------------- b[i] = float(np.prod(s)) return b
def computeExpectationValueEstimation(grid, U, admissibleSet): """ Compute (b)_i = \int phi_i dU(x) on measure U, which is in this case supposed to be a lebesgue measure. @param grid: Grid, sparse grid @param U: list of distributions, Lebeasgue measure @param admissibleSet: AdmissibleSet @return: DataVector """ gs = grid.getStorage() basis = getBasis(grid) # interpolate phi_i phi_j on sparse grid with piecewise polynomial SG # the product of two piecewise linear functions is a piecewise # polynomial one of degree 2. b = DataVector(admissibleSet.getSize()) s = np.ndarray(gs.dim(), dtype='float') # run over all rows p = DataVector(gs.dim()) for i, gpi in enumerate(admissibleSet.values()): gpi.getCoords(p) for d in xrange(gs.dim()): # get level index lid, iid = gpi.getLevel(d), gpi.getIndex(d) xlow = (iid - 1) * 2 ** -lid xhigh = (iid + 1) * 2 ** -lid # ---------------------------------------------------- # use scipy integration def f(x): return basis.eval(lid, iid, x) * U[d].pdf(x) s[d], _ = quad(f, xlow, xhigh, epsabs=1e-8) # ---------------------------------------------------- b[i] = float(np.prod(s)) return b
def computeBilinearForm(grid, U): """ Compute bilinear form (A)_ij = \int phi_i phi_j dU(x) on measure U, which is in this case supposed to be a lebesgue measure. @param grid: Grid, sparse grid @param U: list of distributions, Lebeasgue measure @return: DataMatrix """ gs = grid.getStorage() basis = getBasis(grid) # interpolate phi_i phi_j on sparse grid with piecewise polynomial SG # the product of two piecewise linear functions is a piecewise # polynomial one of degree 2. ngrid = Grid.createPolyBoundaryGrid(1, 2) # ngrid = Grid.createLinearBoundaryGrid(1) ngrid.getGenerator().regular(gs.getMaxLevel() + 1) ngs = ngrid.getStorage() nodalValues = DataVector(ngs.size()) level = DataMatrix(gs.size(), gs.getDimension()) index = DataMatrix(gs.size(), gs.getDimension()) gs.getLevelIndexArraysForEval(level, index) A = DataMatrix(gs.size(), gs.size()) s = np.ndarray(gs.getDimension(), dtype='float') # run over all rows for i in range(gs.size()): gpi = gs.getPoint(i) # run over all columns for j in range(i, gs.size()): # print "%i/%i" % (i * gs.size() + j + 1, gs.size() ** 2) gpj = gs.getPoint(j) # run over all dimensions for d in range(gs.getDimension()): # get level index lid, iid = level.get(i, d), index.get(i, d) ljd, ijd = level.get(j, d), index.get(j, d) # compute left and right boundary of the support of both # basis functions lb = max([((iid - 1) / lid), ((ijd - 1) / ljd)]) ub = min([((iid + 1) / lid), ((ijd + 1) / ljd)]) # same level, different index if lid == ljd and iid != ijd: s[d] = 0. # the support does not overlap elif lid != ljd and lb >= ub: s[d] = 0. else: # ---------------------------------------------------- # do the 1d interpolation ... lid, iid = gpi.getLevel(d), int(iid) ljd, ijd = gpj.getLevel(d), int(ijd) for k in range(ngs.size()): x = ngs.getCoordinate(ngs.getPoint(k), 0) nodalValues[k] = max(0, basis.eval(lid, iid, x)) * \ max(0, basis.eval(ljd, ijd, x)) # ... by hierarchization v = hierarchize(ngrid, nodalValues) def f(x, y): return float(y * U[d].pdf(x[0])) g, w, _ = discretize(ngrid, v, f, refnums=0) # compute the integral of it s[d] = doQuadrature(g, w) # ---------------------------------------------------- # store result in matrix A.set(i, j, float(np.prod(s))) A.set(j, i, A.get(i, j)) return A
def computeBF(grid, U, admissibleSet): """ Compute bilinear form (A)_ij = \int phi_i phi_j dU(x) on measure U, which is in this case supposed to be a lebesgue measure. @param grid: Grid, sparse grid @param U: list of distributions, Lebeasgue measure @param admissibleSet: AdmissibleSet @return: DataMatrix """ gs = grid.getStorage() basis = getBasis(grid) # interpolate phi_i phi_j on sparse grid with piecewise polynomial SG # the product of two piecewise linear functions is a piecewise # polynomial one of degree 2. ngrid = Grid.createPolyBoundaryGrid(1, 2) ngrid.createGridGenerator().regular(2) ngs = ngrid.getStorage() nodalValues = DataVector(ngs.size()) A = DataMatrix(admissibleSet.getSize(), gs.size()) b = DataVector(admissibleSet.getSize()) s = np.ndarray(gs.dim(), dtype='float') # # pre compute basis evaluations # basis_eval = {} # for li in xrange(1, gs.getMaxLevel() + 1): # for i in xrange(1, 2 ** li + 1, 2): # # add value with it self # x = 2 ** -li * i # basis_eval[(li, i, li, i, x)] = basis.eval(li, i, x) * \ # basis.eval(li, i, x) # # # left side # x = 2 ** -(li + 1) * (2 * i - 1) # basis_eval[(li, i, li, i, x)] = basis.eval(li, i, x) * \ # basis.eval(li, i, x) # # right side # x = 2 ** -(li + 1) * (2 * i + 1) # basis_eval[(li, i, li, i, x)] = basis.eval(li, i, x) * \ # basis.eval(li, i, x) # # # add values for hierarchical lower nodes # for lj in xrange(li + 1, gs.getMaxLevel() + 1): # a = 2 ** (lj - li) # j = a * i - a + 1 # while j < a * i + a: # # center # x = 2 ** -lj * j # basis_eval[(li, i, lj, j, x)] = basis.eval(li, i, x) * \ # basis.eval(lj, j, x) # basis_eval[(lj, j, li, i, x)] = basis_eval[(li, i, lj, j, x)] # # left side # x = 2 ** -(lj + 1) * (2 * j - 1) # basis_eval[(li, i, lj, j, x)] = basis.eval(li, i, x) * \ # basis.eval(lj, j, x) # basis_eval[(lj, j, li, i, x)] = basis_eval[(li, i, lj, j, x)] # # right side # x = 2 ** -(lj + 1) * (2 * j + 1) # basis_eval[(li, i, lj, j, x)] = basis.eval(li, i, x) * \ # basis.eval(lj, j, x) # basis_eval[(lj, j, li, i, x)] = basis_eval[(li, i, lj, j, x)] # j += 2 # # print len(basis_eval) # run over all rows for i, gpi in enumerate(admissibleSet.values()): # run over all columns for j in xrange(gs.size()): # print "%i/%i" % (i * gs.size() + j + 1, gs.size() ** 2) gpj = gs.get(j) for d in xrange(gs.dim()): # get level index lid, iid = gpi.getLevel(d), gpi.getIndex(d) ljd, ijd = gpj.getLevel(d), gpj.getIndex(d) # compute left and right boundary of the support of both # basis functions lb = max([(iid - 1) * 2 ** -lid, (ijd - 1) * 2 ** -ljd]) ub = min([(iid + 1) * 2 ** -lid, (ijd + 1) * 2 ** -ljd]) # same level, different index if lid == ljd and iid != ijd: s[d] = 0. # the support does not overlap elif lid != ljd and lb >= ub: s[d] = 0. else: # ---------------------------------------------------- # do the 1d interpolation ... # define transformation function T = LinearTransformation(lb, ub) for k in xrange(ngs.size()): x = ngs.get(k).getCoord(0) x = T.unitToProbabilistic(x) nodalValues[k] = basis.eval(lid, iid, x) * \ basis.eval(ljd, ijd, x) # ... by hierarchization v = hierarchize(ngrid, nodalValues) # discretize the following function def f(x, y): xp = T.unitToProbabilistic(x) return float(y * U[d].pdf(xp)) # sparse grid quadrature g, w, _ = discretize(ngrid, v, f, refnums=0, level=5, useDiscreteL2Error=False) s[d] = doQuadrature(g, w) * (ub - lb) # fig = plt.figure() # plotSG1d(ngrid, v) # x = np.linspace(xlow, ub, 100) # plt.plot(np.linspace(0, 1, 100), U[d].pdf(x)) # fig.show() # fig = plt.figure() # plotSG1d(g, w) # x = np.linspace(0, 1, 100) # plt.plot(x, # [evalSGFunction(ngrid, v, DataVector([xi])) * U[d].pdf(T.unitToProbabilistic(xi)) for xi in x]) # fig.show() # plt.show() # compute the integral of it # ---------------------------------------------------- A.set(i, j, float(np.prod(s))) if gs.seq(gpi) == j: b[i] = A.get(i, j) return A, b
def __doMarginalize(grid, alpha, dd, measure=None): gs = grid.getStorage() dim = gs.dim() if dim < 2: raise AttributeError("The grid has to be at least of dimension 2") if dd >= dim: raise AttributeError("The grid has only %i dimensions, so I can't \ integrate over %i" % (dim, dd)) # create new grid n_dim = dim - 1 n_grid = createGrid(grid, n_dim) n_gs = n_grid.getStorage() # insert grid points n_gp = HashGridIndex(n_dim) for i in xrange(gs.size()): gp = gs.get(i) for d in range(dim): if d == dd: # omit marginalization direction continue elif d < dd: n_gp.set(d, gp.getLevel(d), gp.getIndex(d)) else: n_gp.set(d - 1, gp.getLevel(d), gp.getIndex(d)) # insert grid point if not n_gs.has_key(n_gp): n_gs.insert(n_gp) n_gs.recalcLeafProperty() # create coefficient vector n_alpha = DataVector(n_gs.size()) n_alpha.setAll(0.0) # set function values for n_alpha for i in xrange(gs.size()): gp = gs.get(i) for d in range(dim): if d == dd: dd_level = gp.getLevel(d) dd_index = gp.getIndex(d) elif d < dd: n_gp.set(d, gp.getLevel(d), gp.getIndex(d)) else: n_gp.set(d - 1, gp.getLevel(d), gp.getIndex(d)) if not n_gs.has_key(n_gp): raise Exception("This should not happen!") # compute the integral of the given basis if measure is None: q, err = getIntegral(grid, dd_level, dd_index), 0. else: dist, trans = measure[0][dd], measure[1][dd] lf = LinearGaussQuadratureStrategy([dist], [trans]) basis = getBasis(grid) gpdd = HashGridIndex(1) gpdd.set(0, dd_level, dd_index) q, err = lf.computeLinearFormByList([gpdd], basis) q = q[0] # search for the corresponding index j = n_gs.seq(n_gp) n_alpha[j] += alpha[i] * q return n_grid, n_alpha, err
def computeBilinearForm(grid, U): """ Compute bilinear form (A)_ij = \int phi_i phi_j dU(x) on measure U, which is in this case supposed to be a lebesgue measure. @param grid: Grid, sparse grid @param U: list of distributions, Lebeasgue measure @return: DataMatrix """ gs = grid.getStorage() basis = getBasis(grid) # interpolate phi_i phi_j on sparse grid with piecewise polynomial SG # the product of two piecewise linear functions is a piecewise # polynomial one of degree 2. ngrid = Grid.createPolyBoundaryGrid(1, 2) # ngrid = Grid.createLinearBoundaryGrid(1) ngrid.createGridGenerator().regular(gs.getMaxLevel() + 1) ngs = ngrid.getStorage() nodalValues = DataVector(ngs.size()) level = DataMatrix(gs.size(), gs.dim()) index = DataMatrix(gs.size(), gs.dim()) gs.getLevelIndexArraysForEval(level, index) A = DataMatrix(gs.size(), gs.size()) s = np.ndarray(gs.dim(), dtype='float') # run over all rows for i in xrange(gs.size()): gpi = gs.get(i) # run over all columns for j in xrange(i, gs.size()): # print "%i/%i" % (i * gs.size() + j + 1, gs.size() ** 2) gpj = gs.get(j) # run over all dimensions for d in xrange(gs.dim()): # get level index lid, iid = level.get(i, d), index.get(i, d) ljd, ijd = level.get(j, d), index.get(j, d) # compute left and right boundary of the support of both # basis functions lb = max([(iid - 1) / lid, (ijd - 1) / ljd]) ub = min([(iid + 1) / lid, (ijd + 1) / ljd]) # same level, different index if lid == ljd and iid != ijd: s[d] = 0. # the support does not overlap elif lid != ljd and lb >= ub: s[d] = 0. else: # ---------------------------------------------------- # do the 1d interpolation ... lid, iid = gpi.getLevel(d), int(iid) ljd, ijd = gpj.getLevel(d), int(ijd) for k in xrange(ngs.size()): x = ngs.get(k).getCoord(0) nodalValues[k] = max(0, basis.eval(lid, iid, x)) * \ max(0, basis.eval(ljd, ijd, x)) # ... by hierarchization v = hierarchize(ngrid, nodalValues) def f(x, y): return float(y * U[d].pdf(x[0])) g, w, _ = discretize(ngrid, v, f, refnums=0) # compute the integral of it s[d] = doQuadrature(g, w) # ---------------------------------------------------- # store result in matrix A.set(i, j, float(np.prod(s))) A.set(j, i, A.get(i, j)) return A
def __doMarginalize(grid, alpha, linearForm, dd, measure=None): gs = grid.getStorage() dim = gs.getDimension() if dim < 2: raise AttributeError("The grid has to be at least of dimension 2") if dd >= dim: raise AttributeError("The grid has only %i dimensions, so I can't \ integrate over %i" % (dim, dd)) # create new grid n_dim = dim - 1 n_grid = createGrid(grid, n_dim) n_gs = n_grid.getStorage() # insert grid points n_gp = HashGridPoint(n_dim) for i in range(gs.getSize()): gp = gs.getPoint(i) for d in range(dim): if d == dd: # omit marginalization direction continue elif d < dd: n_gp.set(d, gp.getLevel(d), gp.getIndex(d)) else: n_gp.set(d - 1, gp.getLevel(d), gp.getIndex(d)) # insert grid point if not n_gs.isContaining(n_gp): n_gs.insert(n_gp) n_gs.recalcLeafProperty() # create coefficient vector n_alpha = np.zeros(n_gs.getSize()) basis = getBasis(grid) # set function values for n_alpha for i in range(gs.getSize()): gp = gs.getPoint(i) for d in range(dim): if d == dd: dd_level = gp.getLevel(d) dd_index = gp.getIndex(d) elif d < dd: n_gp.set(d, gp.getLevel(d), gp.getIndex(d)) else: n_gp.set(d - 1, gp.getLevel(d), gp.getIndex(d)) if not n_gs.isContaining(n_gp): raise Exception("This should not happen!") # compute the integral of the given basis if measure is None: q, err = getIntegral(grid, dd_level, dd_index), 0. else: dist, trans = measure[0][dd], measure[1][dd] linearForm.setDistributionAndTransformation([dist], [trans]) gpdd = HashGridPoint(1) gpdd.set(0, dd_level, dd_index) q, err = linearForm.computeLinearFormByList(gs, [gpdd], basis) q = q[0] * trans.vol() err *= trans.vol() # search for the corresponding index j = n_gs.getSequenceNumber(n_gp) n_alpha[j] += alpha[i] * q return n_grid, n_alpha, err
def computeBF(grid, U, admissibleSet): """ Compute bilinear form (A)_ij = \int phi_i phi_j dU(x) on measure U, which is in this case supposed to be a lebesgue measure. @param grid: Grid, sparse grid @param U: list of distributions, Lebeasgue measure @param admissibleSet: AdmissibleSet @return: DataMatrix """ gs = grid.getStorage() basis = getBasis(grid) # interpolate phi_i phi_j on sparse grid with piecewise polynomial SG # the product of two piecewise linear functions is a piecewise # polynomial one of degree 2. ngrid = Grid.createPolyBoundaryGrid(1, 2) ngrid.getGenerator().regular(2) ngs = ngrid.getStorage() nodalValues = DataVector(ngs.size()) A = DataMatrix(admissibleSet.getSize(), gs.size()) b = DataVector(admissibleSet.getSize()) s = np.ndarray(gs.getDimension(), dtype='float') # # pre compute basis evaluations # basis_eval = {} # for li in xrange(1, gs.getMaxLevel() + 1): # for i in xrange(1, 2 ** li + 1, 2): # # add value with it self # x = 2 ** -li * i # basis_eval[(li, i, li, i, x)] = basis.eval(li, i, x) * \ # basis.eval(li, i, x) # # # left side # x = 2 ** -(li + 1) * (2 * i - 1) # basis_eval[(li, i, li, i, x)] = basis.eval(li, i, x) * \ # basis.eval(li, i, x) # # right side # x = 2 ** -(li + 1) * (2 * i + 1) # basis_eval[(li, i, li, i, x)] = basis.eval(li, i, x) * \ # basis.eval(li, i, x) # # # add values for hierarchical lower nodes # for lj in xrange(li + 1, gs.getMaxLevel() + 1): # a = 2 ** (lj - li) # j = a * i - a + 1 # while j < a * i + a: # # center # x = 2 ** -lj * j # basis_eval[(li, i, lj, j, x)] = basis.eval(li, i, x) * \ # basis.eval(lj, j, x) # basis_eval[(lj, j, li, i, x)] = basis_eval[(li, i, lj, j, x)] # # left side # x = 2 ** -(lj + 1) * (2 * j - 1) # basis_eval[(li, i, lj, j, x)] = basis.eval(li, i, x) * \ # basis.eval(lj, j, x) # basis_eval[(lj, j, li, i, x)] = basis_eval[(li, i, lj, j, x)] # # right side # x = 2 ** -(lj + 1) * (2 * j + 1) # basis_eval[(li, i, lj, j, x)] = basis.eval(li, i, x) * \ # basis.eval(lj, j, x) # basis_eval[(lj, j, li, i, x)] = basis_eval[(li, i, lj, j, x)] # j += 2 # # print len(basis_eval) # run over all rows for i, gpi in enumerate(admissibleSet.values()): # run over all columns for j in range(gs.size()): # print "%i/%i" % (i * gs.size() + j + 1, gs.size() ** 2) gpj = gs.getPoint(j) for d in range(gs.getDimension()): # get level index lid, iid = gpi.getLevel(d), gpi.getIndex(d) ljd, ijd = gpj.getLevel(d), gpj.getIndex(d) # compute left and right boundary of the support of both # basis functions lb = max([(iid - 1) * 2 ** -lid, (ijd - 1) * 2 ** -ljd]) ub = min([(iid + 1) * 2 ** -lid, (ijd + 1) * 2 ** -ljd]) # same level, different index if lid == ljd and iid != ijd: s[d] = 0. # the support does not overlap elif lid != ljd and lb >= ub: s[d] = 0. else: # ---------------------------------------------------- # do the 1d interpolation ... # define transformation function T = LinearTransformation(lb, ub) for k in range(ngs.size()): x = ngs.getCoordinate(ngs.getPoint(k), 0) x = T.unitToProbabilistic(x) nodalValues[k] = basis.eval(lid, iid, x) * \ basis.eval(ljd, ijd, x) # ... by hierarchization v = hierarchize(ngrid, nodalValues) # discretize the following function def f(x, y): xp = T.unitToProbabilistic(x) return float(y * U[d].pdf(xp)) # sparse grid quadrature g, w, _ = discretize(ngrid, v, f, refnums=0, level=5, useDiscreteL2Error=False) s[d] = doQuadrature(g, w) * (ub - lb) # fig = plt.figure() # plotSG1d(ngrid, v) # x = np.linspace(xlow, ub, 100) # plt.plot(np.linspace(0, 1, 100), U[d].pdf(x)) # fig.show() # fig = plt.figure() # plotSG1d(g, w) # x = np.linspace(0, 1, 100) # plt.plot(x, # [evalSGFunction(ngrid, v, DataVector([xi])) * U[d].pdf(T.unitToProbabilistic(xi)) for xi in x]) # fig.show() # plt.show() # compute the integral of it # ---------------------------------------------------- A.set(i, j, float(np.prod(s))) if gs.getSequenceNumber(gpi) == j: b[i] = A.get(i, j) return A, b