def writeSurplusesLevelWise(self, filename): # locate all knowledge types available dtypes = self.__uqManager.getKnowledgeTypes() names = ['level'] for dtype in dtypes: names.append("surplus_%s" % KnowledgeTypes.toString(dtype)) ts = self.__knowledge.getAvailableTimeSteps() for t in ts: # collect all the surpluses classifying them by level sum data = {} n = 0 for dtype in dtypes: data[dtype] = self.computeSurplusesLevelWise(t, dtype) n = sum([len(values) for values in list(data[dtype].values())]) A = DataMatrix(n, len(names)) # add them to a matrix structure for i, dtype in enumerate(dtypes): k = 0 for level, surpluses in list(data[dtype].items()): for j, surplus in enumerate(surpluses): A.set(k + j, i + 1, surplus) A.set(k + j, 0, level) k += len(surpluses) writeDataARFF({ 'filename': "%s.t%s.surpluses.arff" % (filename, t), 'data': A, 'names': names })
def writeSurplusesLevelWise(self, filename): # locate all knowledge types available dtypes = self.__learner.getKnowledgeTypes() names = ['level'] for dtype in dtypes: names.append("surplus_%s" % KnowledgeTypes.toString(dtype)) ts = self.__knowledge.getAvailableTimeSteps() for t in ts: # collect all the surpluses classifying them by level sum data = {} n = 0 for dtype in dtypes: data[dtype] = self.computeSurplusesLevelWise(t, dtype) n = sum([len(values) for values in data[dtype].values()]) A = DataMatrix(n, len(names)) # add them to a matrix structure for i, dtype in enumerate(dtypes): k = 0 for level, surpluses in data[dtype].items(): for j, surplus in enumerate(surpluses): A.set(k + j, i + 1, surplus) A.set(k + j, 0, level) k += len(surpluses) writeDataARFF({'filename': "%s.t%s.surpluses.arff" % (filename, t), 'data': A, 'names': names})
def readReferenceMatrix(self, storage, filename): from pysgpp import DataVector, DataMatrix # read reference matrix try: fd = tools.gzOpen(filename, 'r') except IOError as e: fd = None if not fd: fd = tools.gzOpen('tests/' + filename, 'r') dat = fd.read().strip() fd.close() dat = dat.split('\n') dat = [l.strip().split(None) for l in dat] # right number of entries? self.assertEqual(storage.getSize(), len(dat)) self.assertEqual(storage.getSize(), len(dat[0])) m_ref = DataMatrix(len(dat), len(dat[0])) for i in range(len(dat)): for j in range(len(dat[0])): m_ref.set(i, j, float(dat[i][j])) return m_ref
def computePiecewiseConstantBF(grid, U, admissibleSet): # create bilinear form of the grid gs = grid.getStorage() A = DataMatrix(gs.size(), gs.size()) createOperationLTwoDotExplicit(A, grid) # multiply the entries with the pdf at the center of the support p = DataVector(gs.getDimension()) q = DataVector(gs.getDimension()) B = DataMatrix(admissibleSet.getSize(), gs.size()) b = DataVector(admissibleSet.getSize()) # s = np.ndarray(gs.getDimension(), dtype='float') for k, gpi in enumerate(admissibleSet.values()): i = gs.getSequenceNumber(gpi) gs.getCoordinates(gpi, p) for j in range(gs.size()): gs.getCoordinates(gs.getPoint(j), q) # for d in xrange(gs.getDimension()): # # get level index # xlow = max(p[0], q[0]) # xhigh = min(p[1], q[1]) # s[d] = U[d].cdf(xhigh) - U[d].cdf(xlow) y = float(A.get(i, j) * U.pdf(p)) B.set(k, j, y) if i == j: b[k] = y return B, b
def testOperationB(self): from pysgpp import Grid, DataVector, DataMatrix factory = Grid.createLinearBoundaryGrid(1) gen = factory.createGridGenerator() gen.regular(2) alpha = DataVector(factory.getStorage().size()) p = DataMatrix(1, 1) beta = DataVector(1) alpha.setAll(0.0) p.set(0, 0, 0.25) beta[0] = 1.0 opb = factory.createOperationB() opb.mult(beta, p, alpha) self.failUnlessAlmostEqual(alpha[0], 0.75) self.failUnlessAlmostEqual(alpha[1], 0.25) self.failUnlessAlmostEqual(alpha[2], 0.5) self.failUnlessAlmostEqual(alpha[3], 1.0) self.failUnlessAlmostEqual(alpha[4], 0.0) alpha.setAll(0.0) alpha[2] = 1.0 p.set(0, 0, 0.25) beta[0] = 0.0 opb.multTranspose(alpha, p, beta) self.failUnlessAlmostEqual(beta[0], 0.5)
def plotGrid(self, learner, suffix): from mpl_toolkits.mplot3d.axes3d import Axes3D import matplotlib.pyplot as plt # plt.ioff() xs = np.linspace(0, 1, 30) ys = np.linspace(0, 1, 30) X, Y = np.meshgrid(xs, ys) Z = zeros(np.shape(X)) input = DataMatrix(np.shape(Z)[0] * np.shape(Z)[1], 2) r = 0 for i in range(np.shape(Z)[0]): for j in range(np.shape(Z)[1]): input.set(r, 0, X[i, j]) input.set(r, 1, Y[i, j]) r += 1 result = learner.applyData(input) r = 0 for i in range(np.shape(Z)[0]): for j in range(np.shape(Z)[1]): Z[i, j] = result[r] r += 1 fig = plt.figure() ax = Axes3D(fig) ax.plot_wireframe(X, Y, Z) #plt.draw() plt.savefig("grid3d_%s_%i.png" % (suffix, learner.iteration)) fig.clf() plt.close(plt.gcf())
def computeBilinearForm(self, grid): """ Compute bilinear form for the current grid @param grid: Grid @return DataMatrix """ # create bilinear form of the grid gs = grid.getStorage() A = DataMatrix(gs.getSize(), gs.getSize()) A.setAll(0.) createOperationLTwoDotExplicit(A, grid) # multiply the entries with the pdf at the center of the support p = DataVector(gs.getDimension()) q = DataVector(gs.getDimension()) for i in range(gs.getSize()): gpi = gs.getPoint(i) gs.getCoordinates(gpi, p) for j in range(gs.getSize()): gpj = gs.getPoint(j) gs.getCoordinates(gpj, q) y = float(A.get(i, j) * self._U.pdf(p)) A.set(i, j, y) A.set(j, i, y) self._map[self.getKey([gpi, gpj])] = A.get(i, j) return A
def computePiecewiseConstantBF(grid, U, admissibleSet): # create bilinear form of the grid gs = grid.getStorage() A = DataMatrix(gs.size(), gs.size()) createOperationLTwoDotExplicit(A, grid) # multiply the entries with the pdf at the center of the support p = DataVector(gs.dim()) q = DataVector(gs.dim()) B = DataMatrix(admissibleSet.getSize(), gs.size()) b = DataVector(admissibleSet.getSize()) # s = np.ndarray(gs.dim(), dtype='float') for k, gpi in enumerate(admissibleSet.values()): i = gs.seq(gpi) gpi.getCoords(p) for j in xrange(gs.size()): gs.get(j).getCoords(q) # for d in xrange(gs.dim()): # # get level index # xlow = max(p[0], q[0]) # xhigh = min(p[1], q[1]) # s[d] = U[d].cdf(xhigh) - U[d].cdf(xlow) y = float(A.get(i, j) * U.pdf(p)) B.set(k, j, y) if i == j: b[k] = y return B, b
def testOperationB(self): from pysgpp import Grid, DataVector, DataMatrix factory = Grid.createLinearBoundaryGrid(1) gen = factory.createGridGenerator() gen.regular(2) alpha = DataVector(factory.getStorage().size()) p = DataMatrix(1,1) beta = DataVector(1) alpha.setAll(0.0) p.set(0,0,0.25) beta[0] = 1.0 opb = factory.createOperationB() opb.mult(beta, p, alpha) self.failUnlessAlmostEqual(alpha[0], 0.75) self.failUnlessAlmostEqual(alpha[1], 0.25) self.failUnlessAlmostEqual(alpha[2], 0.5) self.failUnlessAlmostEqual(alpha[3], 1.0) self.failUnlessAlmostEqual(alpha[4], 0.0) alpha.setAll(0.0) alpha[2] = 1.0 p.set(0,0, 0.25) beta[0] = 0.0 opb.multTranspose(alpha, p, beta) self.failUnlessAlmostEqual(beta[0], 0.5)
def computeBilinearForm(self, grid): """ Compute bilinear form for the current grid @param grid: Grid @return DataMatrix """ # create bilinear form of the grid gs = grid.getStorage() A = DataMatrix(gs.size(), gs.size()) A.setAll(0.) createOperationLTwoDotExplicit(A, grid) gs = grid.getStorage() A = DataMatrix(gs.size(), gs.size()) createOperationLTwoDotExplicit(A, grid) # multiply the entries with the pdf at the center of the support p = DataVector(gs.dim()) q = DataVector(gs.dim()) for i in xrange(gs.size()): gpi = gs.get(i) gpi.getCoords(p) for j in xrange(gs.size()): gpj = gs.get(j) gpj.getCoords(q) y = float(A.get(i, j) * self._U.pdf(p)) A.set(i, j, y) A.set(j, i, y) self._map[self.getKey(gpi, gpj)] = A.get(i, j) return A
def computeTrilinearFormByList(self, gpsk, basisk, alphak, gpsi, basisi, gpsj, basisj): """ Compute trilinear form for two lists of grid points @param gpsk: list of HashGridIndex @param basisk: SG++ basis for grid indices gpsk @param alphak: coefficients for kth grid @param gpsi: list of HashGridIndex @param basisi: SG++ basis for grid indices gpsi @param gpsj: list of HashGridIndex @param basisj: SG++ basis for grid indices gpsj @return: DataMatrix """ print "# evals: %i^2 * %i = %i" % (len(gpsi), len(gpsk), len(gpsi)**2 * len(gpsk)) A = DataMatrix(len(gpsi), len(gpsj)) err = 0. # run over all rows for i, gpi in enumerate(gpsi): # run over all columns for j, gpj in enumerate(gpsj): # run over all gpks b, erri = self.computeTrilinearFormByRow( gpsk, basisk, gpi, basisi, gpj, basisj) # get the overall contribution in the current dimension value = alphak.dotProduct(b) A.set(i, j, value) # error statistics err += erri return A, err
def computeTrilinearFormByList(self, gpsk, basisk, alphak, gpsi, basisi, gpsj, basisj): """ Compute trilinear form for two lists of grid points @param gpsk: list of HashGridIndex @param basisk: SG++ basis for grid indices gpsk @param alphak: coefficients for kth grid @param gpsi: list of HashGridIndex @param basisi: SG++ basis for grid indices gpsi @param gpsj: list of HashGridIndex @param basisj: SG++ basis for grid indices gpsj @return: DataMatrix """ print "# evals: %i^2 * %i = %i" % (len(gpsi), len(gpsk), len(gpsi) ** 2 * len(gpsk)) A = DataMatrix(len(gpsi), len(gpsj)) err = 0. # run over all rows for i, gpi in enumerate(gpsi): # run over all columns for j, gpj in enumerate(gpsj): # run over all gpks b, erri = self.computeTrilinearFormByRow(gpsk, basisk, gpi, basisi, gpj, basisj) # get the overall contribution in the current dimension value = alphak.dotProduct(b) A.set(i, j, value) # error statistics err += erri return A, err
def plotGrid(self, learner, suffix): from mpl_toolkits.mplot3d.axes3d import Axes3D import matplotlib.pyplot as plt xs = np.linspace(0, 1, 30) ys = np.linspace(0, 1, 30) X, Y = np.meshgrid(xs, ys) Z = zeros(np.shape(X)) input = DataMatrix(np.shape(Z)[0]*np.shape(Z)[1], 2) r = 0 for i in xrange(np.shape(Z)[0]): for j in xrange(np.shape(Z)[1]): input.set(r, 0, X[i,j]) input.set(r, 1, Y[i,j]) r += 1 result = learner.applyData(input) r = 0 for i in xrange(np.shape(Z)[0]): for j in xrange(np.shape(Z)[1]): Z[i,j] = result[r] r += 1 fig = plt.figure() ax = Axes3D(fig) ax.plot_wireframe(X,Y,Z) #plt.draw() plt.savefig("grid3d_%s_%i.png" % (suffix, learner.iteration)) fig.clf() plt.close(plt.gcf())
def setUp(self): self.size = 9 self.level = 4 points = DataMatrix(self.size, 1) values = DataVector(self.size) for i in xrange(self.size): points.set(i, 0, i) values[i] = -1 if i < self.size/2 else 1 self.dataContainer = DataContainer(points=points, values=values) self.policy = StratifiedFoldingPolicy(self.dataContainer, self.level)
def setUp(self): self.size = 9 self.level = 4 points = DataMatrix(self.size, 1) values = DataVector(self.size) for i in range(self.size): points.set(i, 0, i) values[i] = -1 if i < self.size // 2 else 1 self.dataContainer = DataContainer(points=points, values=values) self.policy = StratifiedFoldingPolicy(self.dataContainer, self.level)
def setUp(self): self.size = 11 self.level = 10 points = DataMatrix(self.size, 1) values = DataVector(self.size) for i in xrange(self.size): points.set(i, 0, i) values[i] = i self.dataContainer = DataContainer(points=points, values=values) self.policy = SequentialFoldingPolicy(self.dataContainer, self.level)
def setUp(self): self.size = 11 self.level = 10 points = DataMatrix(self.size, 1) values = DataVector(self.size) for i in range(self.size): points.set(i, 0, i) values[i] = i self.dataContainer = DataContainer(points=points, values=values) self.policy = SequentialFoldingPolicy(self.dataContainer, self.level)
def buildTrainingVector(data): from pysgpp import DataMatrix dim = len(data["data"]) training = DataMatrix(len(data["data"][0]), dim) # i iterates over the data points, d over the dimension of one data point for i in xrange(len(data["data"][0])): for d in xrange(dim): training.set(i, d, data["data"][d][i]) return training
def buildTrainingVector(data): from pysgpp import DataMatrix dim = len(data["data"]) training = DataMatrix(len(data["data"][0]), dim) # i iterates over the data points, d over the dimension of one data point for i in range(len(data["data"][0])): for d in range(dim): training.set(i, d, data["data"][d][i]) return training
def setUp(self): self.size = 11 self.level = 10 self.seed = 42 points = DataMatrix(self.size, 1) values = DataVector(self.size) for i in xrange(self.size): points.set(i, 0, i) values[i] = i self.dataContainer = DataContainer(points=points, values=values) self.policy = RandomFoldingPolicy(self.dataContainer, self.level, self.seed)
def computeBilinearFormQuad(grid, U): gs = grid.getStorage() basis = getBasis(grid) A = DataMatrix(gs.size(), gs.size()) level = DataMatrix(gs.size(), gs.dim()) index = DataMatrix(gs.size(), gs.dim()) gs.getLevelIndexArraysForEval(level, index) s = np.ndarray(gs.dim(), dtype='float') # run over all rows for i in xrange(gs.size()): gpi = gs.get(i) # run over all columns for j in xrange(i, gs.size()): # print "%i/%i" % (i * gs.size() + j + 1, gs.size() ** 2) gpj = gs.get(j) for d in xrange(gs.dim()): # get level index lid, iid = level.get(i, d), index.get(i, d) ljd, ijd = level.get(j, d), index.get(j, d) # compute left and right boundary of the support of both # basis functions lb = max([(iid - 1) / lid, (ijd - 1) / ljd]) ub = min([(iid + 1) / lid, (ijd + 1) / ljd]) # same level, different index if lid == ljd and iid != ijd: s[d] = 0. # the support does not overlap elif lid != ljd and lb >= ub: s[d] = 0. else: lid, iid = gpi.getLevel(d), int(iid) ljd, ijd = gpj.getLevel(d), int(ijd) # ---------------------------------------------------- # use scipy for integration def f(x): return basis.eval(lid, iid, x) * \ basis.eval(ljd, ijd, x) * \ U[d].pdf(x) s[d], _ = quad(f, lb, ub, epsabs=1e-8) # ---------------------------------------------------- A.set(i, j, float(np.prod(s))) A.set(j, i, A.get(i, j)) return A
def computeBFQuad(grid, U, admissibleSet, n=100): """ @param grid: Grid @param U: list of distributions @param admissibleSet: AdmissibleSet @param n: int, number of MC samples """ gs = grid.getStorage() basis = getBasis(grid) A = DataMatrix(admissibleSet.getSize(), gs.size()) b = DataVector(admissibleSet.getSize()) s = np.ndarray(gs.dim(), dtype='float') # run over all rows for i, gpi in enumerate(admissibleSet.values()): # run over all columns for j in xrange(gs.size()): # print "%i/%i" % (i * gs.size() + j + 1, gs.size() ** 2) gpj = gs.get(j) for d in xrange(gs.dim()): # get level index lid, iid = gpi.getLevel(d), gpi.getIndex(d) ljd, ijd = gpj.getLevel(d), gpj.getIndex(d) # compute left and right boundary of the support of both # basis functions xlow = max([(iid - 1) * 2 ** -lid, (ijd - 1) * 2 ** -ljd]) xhigh = min([(iid + 1) * 2 ** -lid, (ijd + 1) * 2 ** -ljd]) # same level, different index if lid == ljd and iid != ijd: s[d] = 0. # the support does not overlap elif lid != ljd and xlow >= xhigh: s[d] = 0. else: # ---------------------------------------------------- # use scipy for integration def f(x): return basis.eval(lid, iid, x) * \ basis.eval(ljd, ijd, x) * \ U[d].pdf(x) s[d], _ = quad(f, xlow, xhigh, epsabs=1e-8) # ---------------------------------------------------- A.set(i, j, float(np.prod(s))) if gs.seq(gpi) == j: b[i] = A.get(i, j) return A, b
def computeBFQuad(grid, U, admissibleSet, n=100): """ @param grid: Grid @param U: list of distributions @param admissibleSet: AdmissibleSet @param n: int, number of MC samples """ gs = grid.getStorage() basis = getBasis(grid) A = DataMatrix(admissibleSet.getSize(), gs.size()) b = DataVector(admissibleSet.getSize()) s = np.ndarray(gs.getDimension(), dtype='float') # run over all rows for i, gpi in enumerate(admissibleSet.values()): # run over all columns for j in range(gs.size()): # print "%i/%i" % (i * gs.size() + j + 1, gs.size() ** 2) gpj = gs.getPoint(j) for d in range(gs.getDimension()): # get level index lid, iid = gpi.getLevel(d), gpi.getIndex(d) ljd, ijd = gpj.getLevel(d), gpj.getIndex(d) # compute left and right boundary of the support of both # basis functions xlow = max([(iid - 1) * 2 ** -lid, (ijd - 1) * 2 ** -ljd]) xhigh = min([(iid + 1) * 2 ** -lid, (ijd + 1) * 2 ** -ljd]) # same level, different index if lid == ljd and iid != ijd: s[d] = 0. # the support does not overlap elif lid != ljd and xlow >= xhigh: s[d] = 0. else: # ---------------------------------------------------- # use scipy for integration def f(x): return basis.eval(lid, iid, x) * \ basis.eval(ljd, ijd, x) * \ U[d].pdf(x) s[d], _ = quad(f, xlow, xhigh, epsabs=1e-8) # ---------------------------------------------------- A.set(i, j, float(np.prod(s))) if gs.getSequenceNumber(gpi) == j: b[i] = A.get(i, j) return A, b
def computePiecewiseConstantBilinearForm(grid, U): # create bilinear form of the grid gs = grid.getStorage() A = DataMatrix(gs.size(), gs.size()) createOperationLTwoDotExplicit(A, grid) # multiply the entries with the pdf at the center of the support p = DataVector(gs.getDimension()) q = DataVector(gs.getDimension()) for i in range(gs.size()): gs.getCoordinates(gs.getPoint(i), p) for j in range(gs.size()): gs.getCoordinates(gs.getPoint(j), q) # compute center of the support p.add(q) p.mult(0.5) # multiply the entries in A with the pdf at p y = float(A.get(i, j) * U.pdf(p)) A.set(i, j, y) A.set(j, i, y) return A
def computePiecewiseConstantBilinearForm(grid, U): # create bilinear form of the grid gs = grid.getStorage() A = DataMatrix(gs.size(), gs.size()) createOperationLTwoDotExplicit(A, grid) # multiply the entries with the pdf at the center of the support p = DataVector(gs.dim()) q = DataVector(gs.dim()) for i in xrange(gs.size()): gs.get(i).getCoords(p) for j in xrange(gs.size()): gs.get(j).getCoords(q) # compute center of the support p.add(q) p.mult(0.5) # multiply the entries in A with the pdf at p y = float(A.get(i, j) * U.pdf(p)) A.set(i, j, y) A.set(j, i, y) return A
if not fd: fd = tools.gzOpen('tests/' + filename, 'r') dat = fd.read().strip() fd.close() dat = dat.split('\n') dat = map(lambda l: l.strip().split(None), dat) # right number of entries? self.assertEqual(storage.size(), len(dat)) self.assertEqual(storage.size(), len(dat[0])) m_ref = DataMatrix(len(dat), len(dat[0])) for i in xrange(len(dat)): for j in xrange(len(dat[0])): m_ref.set(i, j, float(dat[i][j])) return m_ref def readDataVector(filename): from pysgpp import DataVector try: fin = tools.gzOpen(filename, 'r') except IOError, e: fin = None if not fin: fin = tools.gzOpen('tests/' + filename, 'r') data = []
def computeBF(grid, U, admissibleSet): """ Compute bilinear form (A)_ij = \int phi_i phi_j dU(x) on measure U, which is in this case supposed to be a lebesgue measure. @param grid: Grid, sparse grid @param U: list of distributions, Lebeasgue measure @param admissibleSet: AdmissibleSet @return: DataMatrix """ gs = grid.getStorage() basis = getBasis(grid) # interpolate phi_i phi_j on sparse grid with piecewise polynomial SG # the product of two piecewise linear functions is a piecewise # polynomial one of degree 2. ngrid = Grid.createPolyBoundaryGrid(1, 2) ngrid.createGridGenerator().regular(2) ngs = ngrid.getStorage() nodalValues = DataVector(ngs.size()) A = DataMatrix(admissibleSet.getSize(), gs.size()) b = DataVector(admissibleSet.getSize()) s = np.ndarray(gs.dim(), dtype='float') # # pre compute basis evaluations # basis_eval = {} # for li in xrange(1, gs.getMaxLevel() + 1): # for i in xrange(1, 2 ** li + 1, 2): # # add value with it self # x = 2 ** -li * i # basis_eval[(li, i, li, i, x)] = basis.eval(li, i, x) * \ # basis.eval(li, i, x) # # # left side # x = 2 ** -(li + 1) * (2 * i - 1) # basis_eval[(li, i, li, i, x)] = basis.eval(li, i, x) * \ # basis.eval(li, i, x) # # right side # x = 2 ** -(li + 1) * (2 * i + 1) # basis_eval[(li, i, li, i, x)] = basis.eval(li, i, x) * \ # basis.eval(li, i, x) # # # add values for hierarchical lower nodes # for lj in xrange(li + 1, gs.getMaxLevel() + 1): # a = 2 ** (lj - li) # j = a * i - a + 1 # while j < a * i + a: # # center # x = 2 ** -lj * j # basis_eval[(li, i, lj, j, x)] = basis.eval(li, i, x) * \ # basis.eval(lj, j, x) # basis_eval[(lj, j, li, i, x)] = basis_eval[(li, i, lj, j, x)] # # left side # x = 2 ** -(lj + 1) * (2 * j - 1) # basis_eval[(li, i, lj, j, x)] = basis.eval(li, i, x) * \ # basis.eval(lj, j, x) # basis_eval[(lj, j, li, i, x)] = basis_eval[(li, i, lj, j, x)] # # right side # x = 2 ** -(lj + 1) * (2 * j + 1) # basis_eval[(li, i, lj, j, x)] = basis.eval(li, i, x) * \ # basis.eval(lj, j, x) # basis_eval[(lj, j, li, i, x)] = basis_eval[(li, i, lj, j, x)] # j += 2 # # print len(basis_eval) # run over all rows for i, gpi in enumerate(admissibleSet.values()): # run over all columns for j in xrange(gs.size()): # print "%i/%i" % (i * gs.size() + j + 1, gs.size() ** 2) gpj = gs.get(j) for d in xrange(gs.dim()): # get level index lid, iid = gpi.getLevel(d), gpi.getIndex(d) ljd, ijd = gpj.getLevel(d), gpj.getIndex(d) # compute left and right boundary of the support of both # basis functions lb = max([(iid - 1) * 2 ** -lid, (ijd - 1) * 2 ** -ljd]) ub = min([(iid + 1) * 2 ** -lid, (ijd + 1) * 2 ** -ljd]) # same level, different index if lid == ljd and iid != ijd: s[d] = 0. # the support does not overlap elif lid != ljd and lb >= ub: s[d] = 0. else: # ---------------------------------------------------- # do the 1d interpolation ... # define transformation function T = LinearTransformation(lb, ub) for k in xrange(ngs.size()): x = ngs.get(k).getCoord(0) x = T.unitToProbabilistic(x) nodalValues[k] = basis.eval(lid, iid, x) * \ basis.eval(ljd, ijd, x) # ... by hierarchization v = hierarchize(ngrid, nodalValues) # discretize the following function def f(x, y): xp = T.unitToProbabilistic(x) return float(y * U[d].pdf(xp)) # sparse grid quadrature g, w, _ = discretize(ngrid, v, f, refnums=0, level=5, useDiscreteL2Error=False) s[d] = doQuadrature(g, w) * (ub - lb) # fig = plt.figure() # plotSG1d(ngrid, v) # x = np.linspace(xlow, ub, 100) # plt.plot(np.linspace(0, 1, 100), U[d].pdf(x)) # fig.show() # fig = plt.figure() # plotSG1d(g, w) # x = np.linspace(0, 1, 100) # plt.plot(x, # [evalSGFunction(ngrid, v, DataVector([xi])) * U[d].pdf(T.unitToProbabilistic(xi)) for xi in x]) # fig.show() # plt.show() # compute the integral of it # ---------------------------------------------------- A.set(i, j, float(np.prod(s))) if gs.seq(gpi) == j: b[i] = A.get(i, j) return A, b
def computeBilinearForm(grid, U): """ Compute bilinear form (A)_ij = \int phi_i phi_j dU(x) on measure U, which is in this case supposed to be a lebesgue measure. @param grid: Grid, sparse grid @param U: list of distributions, Lebeasgue measure @return: DataMatrix """ gs = grid.getStorage() basis = getBasis(grid) # interpolate phi_i phi_j on sparse grid with piecewise polynomial SG # the product of two piecewise linear functions is a piecewise # polynomial one of degree 2. ngrid = Grid.createPolyBoundaryGrid(1, 2) # ngrid = Grid.createLinearBoundaryGrid(1) ngrid.getGenerator().regular(gs.getMaxLevel() + 1) ngs = ngrid.getStorage() nodalValues = DataVector(ngs.size()) level = DataMatrix(gs.size(), gs.getDimension()) index = DataMatrix(gs.size(), gs.getDimension()) gs.getLevelIndexArraysForEval(level, index) A = DataMatrix(gs.size(), gs.size()) s = np.ndarray(gs.getDimension(), dtype='float') # run over all rows for i in range(gs.size()): gpi = gs.getPoint(i) # run over all columns for j in range(i, gs.size()): # print "%i/%i" % (i * gs.size() + j + 1, gs.size() ** 2) gpj = gs.getPoint(j) # run over all dimensions for d in range(gs.getDimension()): # get level index lid, iid = level.get(i, d), index.get(i, d) ljd, ijd = level.get(j, d), index.get(j, d) # compute left and right boundary of the support of both # basis functions lb = max([((iid - 1) / lid), ((ijd - 1) / ljd)]) ub = min([((iid + 1) / lid), ((ijd + 1) / ljd)]) # same level, different index if lid == ljd and iid != ijd: s[d] = 0. # the support does not overlap elif lid != ljd and lb >= ub: s[d] = 0. else: # ---------------------------------------------------- # do the 1d interpolation ... lid, iid = gpi.getLevel(d), int(iid) ljd, ijd = gpj.getLevel(d), int(ijd) for k in range(ngs.size()): x = ngs.getCoordinate(ngs.getPoint(k), 0) nodalValues[k] = max(0, basis.eval(lid, iid, x)) * \ max(0, basis.eval(ljd, ijd, x)) # ... by hierarchization v = hierarchize(ngrid, nodalValues) def f(x, y): return float(y * U[d].pdf(x[0])) g, w, _ = discretize(ngrid, v, f, refnums=0) # compute the integral of it s[d] = doQuadrature(g, w) # ---------------------------------------------------- # store result in matrix A.set(i, j, float(np.prod(s))) A.set(j, i, A.get(i, j)) return A
def computeBF(grid, U, admissibleSet): """ Compute bilinear form (A)_ij = \int phi_i phi_j dU(x) on measure U, which is in this case supposed to be a lebesgue measure. @param grid: Grid, sparse grid @param U: list of distributions, Lebeasgue measure @param admissibleSet: AdmissibleSet @return: DataMatrix """ gs = grid.getStorage() basis = getBasis(grid) # interpolate phi_i phi_j on sparse grid with piecewise polynomial SG # the product of two piecewise linear functions is a piecewise # polynomial one of degree 2. ngrid = Grid.createPolyBoundaryGrid(1, 2) ngrid.getGenerator().regular(2) ngs = ngrid.getStorage() nodalValues = DataVector(ngs.size()) A = DataMatrix(admissibleSet.getSize(), gs.size()) b = DataVector(admissibleSet.getSize()) s = np.ndarray(gs.getDimension(), dtype='float') # # pre compute basis evaluations # basis_eval = {} # for li in xrange(1, gs.getMaxLevel() + 1): # for i in xrange(1, 2 ** li + 1, 2): # # add value with it self # x = 2 ** -li * i # basis_eval[(li, i, li, i, x)] = basis.eval(li, i, x) * \ # basis.eval(li, i, x) # # # left side # x = 2 ** -(li + 1) * (2 * i - 1) # basis_eval[(li, i, li, i, x)] = basis.eval(li, i, x) * \ # basis.eval(li, i, x) # # right side # x = 2 ** -(li + 1) * (2 * i + 1) # basis_eval[(li, i, li, i, x)] = basis.eval(li, i, x) * \ # basis.eval(li, i, x) # # # add values for hierarchical lower nodes # for lj in xrange(li + 1, gs.getMaxLevel() + 1): # a = 2 ** (lj - li) # j = a * i - a + 1 # while j < a * i + a: # # center # x = 2 ** -lj * j # basis_eval[(li, i, lj, j, x)] = basis.eval(li, i, x) * \ # basis.eval(lj, j, x) # basis_eval[(lj, j, li, i, x)] = basis_eval[(li, i, lj, j, x)] # # left side # x = 2 ** -(lj + 1) * (2 * j - 1) # basis_eval[(li, i, lj, j, x)] = basis.eval(li, i, x) * \ # basis.eval(lj, j, x) # basis_eval[(lj, j, li, i, x)] = basis_eval[(li, i, lj, j, x)] # # right side # x = 2 ** -(lj + 1) * (2 * j + 1) # basis_eval[(li, i, lj, j, x)] = basis.eval(li, i, x) * \ # basis.eval(lj, j, x) # basis_eval[(lj, j, li, i, x)] = basis_eval[(li, i, lj, j, x)] # j += 2 # # print len(basis_eval) # run over all rows for i, gpi in enumerate(admissibleSet.values()): # run over all columns for j in range(gs.size()): # print "%i/%i" % (i * gs.size() + j + 1, gs.size() ** 2) gpj = gs.getPoint(j) for d in range(gs.getDimension()): # get level index lid, iid = gpi.getLevel(d), gpi.getIndex(d) ljd, ijd = gpj.getLevel(d), gpj.getIndex(d) # compute left and right boundary of the support of both # basis functions lb = max([(iid - 1) * 2 ** -lid, (ijd - 1) * 2 ** -ljd]) ub = min([(iid + 1) * 2 ** -lid, (ijd + 1) * 2 ** -ljd]) # same level, different index if lid == ljd and iid != ijd: s[d] = 0. # the support does not overlap elif lid != ljd and lb >= ub: s[d] = 0. else: # ---------------------------------------------------- # do the 1d interpolation ... # define transformation function T = LinearTransformation(lb, ub) for k in range(ngs.size()): x = ngs.getCoordinate(ngs.getPoint(k), 0) x = T.unitToProbabilistic(x) nodalValues[k] = basis.eval(lid, iid, x) * \ basis.eval(ljd, ijd, x) # ... by hierarchization v = hierarchize(ngrid, nodalValues) # discretize the following function def f(x, y): xp = T.unitToProbabilistic(x) return float(y * U[d].pdf(xp)) # sparse grid quadrature g, w, _ = discretize(ngrid, v, f, refnums=0, level=5, useDiscreteL2Error=False) s[d] = doQuadrature(g, w) * (ub - lb) # fig = plt.figure() # plotSG1d(ngrid, v) # x = np.linspace(xlow, ub, 100) # plt.plot(np.linspace(0, 1, 100), U[d].pdf(x)) # fig.show() # fig = plt.figure() # plotSG1d(g, w) # x = np.linspace(0, 1, 100) # plt.plot(x, # [evalSGFunction(ngrid, v, DataVector([xi])) * U[d].pdf(T.unitToProbabilistic(xi)) for xi in x]) # fig.show() # plt.show() # compute the integral of it # ---------------------------------------------------- A.set(i, j, float(np.prod(s))) if gs.getSequenceNumber(gpi) == j: b[i] = A.get(i, j) return A, b
if not fd: fd = tools.gzOpen('tests/' + filename, 'r') dat = fd.read().strip() fd.close() dat = dat.split('\n') dat = map(lambda l: l.strip().split(None), dat) # right number of entries? self.assertEqual(storage.size(), len(dat)) self.assertEqual(storage.size(), len(dat[0])) m_ref = DataMatrix(len(dat), len(dat[0])) for i in xrange(len(dat)): for j in xrange(len(dat[0])): m_ref.set(i, j, float(dat[i][j])) return m_ref def readDataVector(filename): from pysgpp import DataVector try: fin = tools.gzOpen(filename, 'r') except IOError, e: fin = None if not fin: fin = tools.gzOpen('tests/' + filename, 'r')
def computeBilinearForm(grid, U): """ Compute bilinear form (A)_ij = \int phi_i phi_j dU(x) on measure U, which is in this case supposed to be a lebesgue measure. @param grid: Grid, sparse grid @param U: list of distributions, Lebeasgue measure @return: DataMatrix """ gs = grid.getStorage() basis = getBasis(grid) # interpolate phi_i phi_j on sparse grid with piecewise polynomial SG # the product of two piecewise linear functions is a piecewise # polynomial one of degree 2. ngrid = Grid.createPolyBoundaryGrid(1, 2) # ngrid = Grid.createLinearBoundaryGrid(1) ngrid.createGridGenerator().regular(gs.getMaxLevel() + 1) ngs = ngrid.getStorage() nodalValues = DataVector(ngs.size()) level = DataMatrix(gs.size(), gs.dim()) index = DataMatrix(gs.size(), gs.dim()) gs.getLevelIndexArraysForEval(level, index) A = DataMatrix(gs.size(), gs.size()) s = np.ndarray(gs.dim(), dtype='float') # run over all rows for i in xrange(gs.size()): gpi = gs.get(i) # run over all columns for j in xrange(i, gs.size()): # print "%i/%i" % (i * gs.size() + j + 1, gs.size() ** 2) gpj = gs.get(j) # run over all dimensions for d in xrange(gs.dim()): # get level index lid, iid = level.get(i, d), index.get(i, d) ljd, ijd = level.get(j, d), index.get(j, d) # compute left and right boundary of the support of both # basis functions lb = max([(iid - 1) / lid, (ijd - 1) / ljd]) ub = min([(iid + 1) / lid, (ijd + 1) / ljd]) # same level, different index if lid == ljd and iid != ijd: s[d] = 0. # the support does not overlap elif lid != ljd and lb >= ub: s[d] = 0. else: # ---------------------------------------------------- # do the 1d interpolation ... lid, iid = gpi.getLevel(d), int(iid) ljd, ijd = gpj.getLevel(d), int(ijd) for k in xrange(ngs.size()): x = ngs.get(k).getCoord(0) nodalValues[k] = max(0, basis.eval(lid, iid, x)) * \ max(0, basis.eval(ljd, ijd, x)) # ... by hierarchization v = hierarchize(ngrid, nodalValues) def f(x, y): return float(y * U[d].pdf(x[0])) g, w, _ = discretize(ngrid, v, f, refnums=0) # compute the integral of it s[d] = doQuadrature(g, w) # ---------------------------------------------------- # store result in matrix A.set(i, j, float(np.prod(s))) A.set(j, i, A.get(i, j)) return A