def testOperationTest_test(self): from pysgpp import Grid, DataVector, DataMatrix factory = Grid.createLinearBoundaryGrid(1) gen = factory.createGridGenerator() gen.regular(1) alpha = DataVector(factory.getStorage().size()) data = DataMatrix(1, 1) data.setAll(0.25) classes = DataVector(1) classes.setAll(1.0) testOP = factory.createOperationTest() alpha[0] = 0.0 alpha[1] = 0.0 alpha[2] = 1.0 c = testOP.test(alpha, data, classes) self.failUnless(c > 0.0) alpha[0] = 0.0 alpha[1] = 0.0 alpha[2] = -1.0 c = testOP.test(alpha, data, classes) self.failUnless(c == 0.0)
def cdf(self, x): # convert the parameter to the right format if isList(x): x = DataVector(x) elif isNumerical(x): x = DataVector([x]) elif isMatrix(x): x = DataMatrix(x) if isinstance(x, DataMatrix): A = x B = DataMatrix(A.getNrows(), A.getNcols()) B.setAll(0.0) elif isinstance(x, DataVector): A = DataMatrix(1, len(x)) A.setRow(0, x) B = DataMatrix(1, len(x)) B.setAll(0) # do the transformation self.dist.cdf(A, B) # transform the outcome if isNumerical(x) or isinstance(x, DataVector): return B.get(0, 0) elif isinstance(x, DataMatrix): return B.array()
def computeBilinearForm(self, grid): """ Compute bilinear form for the current grid @param grid: Grid @return DataMatrix """ # create bilinear form of the grid gs = grid.getStorage() A = DataMatrix(gs.size(), gs.size()) A.setAll(0.) createOperationLTwoDotExplicit(A, grid) gs = grid.getStorage() A = DataMatrix(gs.size(), gs.size()) createOperationLTwoDotExplicit(A, grid) # multiply the entries with the pdf at the center of the support p = DataVector(gs.dim()) q = DataVector(gs.dim()) for i in xrange(gs.size()): gpi = gs.get(i) gpi.getCoords(p) for j in xrange(gs.size()): gpj = gs.get(j) gpj.getCoords(q) y = float(A.get(i, j) * self._U.pdf(p)) A.set(i, j, y) A.set(j, i, y) self._map[self.getKey(gpi, gpj)] = A.get(i, j) return A
def testOperationTest_test(self): from pysgpp import Grid, DataVector, DataMatrix factory = Grid.createLinearBoundaryGrid(1) gen = factory.createGridGenerator() gen.regular(1) alpha = DataVector(factory.getStorage().size()) data = DataMatrix(1,1) data.setAll(0.25) classes = DataVector(1) classes.setAll(1.0) testOP = factory.createOperationTest() alpha[0] = 0.0 alpha[1] = 0.0 alpha[2] = 1.0 c = testOP.test(alpha, data, classes) self.failUnless(c > 0.0) alpha[0] = 0.0 alpha[1] = 0.0 alpha[2] = -1.0 c = testOP.test(alpha, data, classes) self.failUnless(c == 0.0)
def ppf(self, x): # convert the parameter to the right format if isList(x): x = DataVector(x) elif isNumerical(x): x = DataVector([x]) elif isMatrix(x): x = DataMatrix(x) if isinstance(x, DataMatrix): A = x B = DataMatrix(A.getNrows(), A.getNcols()) B.setAll(0.0) elif isinstance(x, DataVector): A = DataMatrix(1, len(x)) A.setRow(0, x) B = DataMatrix(1, len(x)) B.setAll(0) # do the transformation opInvRosen = createOperationInverseRosenblattTransformationKDE(self.dist) opInvRosen.doTransformation(A, B) # transform the outcome if isNumerical(x) or isinstance(x, DataVector): return B.get(0, 0) elif isinstance(x, DataMatrix): return B.array()
def ppf(self, x): # convert the parameter to the right format if isList(x): x = DataVector(x) elif isNumerical(x): x = DataVector([x]) elif isMatrix(x): x = DataMatrix(x) if isinstance(x, DataMatrix): A = x B = DataMatrix(A.getNrows(), A.getNcols()) B.setAll(0.0) elif isinstance(x, DataVector): A = DataMatrix(1, len(x)) A.setRow(0, x) B = DataMatrix(1, len(x)) B.setAll(0) # do the transformation self.dist.ppf(A, B) # transform the outcome if isNumerical(x) or isinstance(x, DataVector): return B.get(0, 0) elif isinstance(x, DataMatrix): return B.array()
def computeBilinearForm(self, grid): """ Compute bilinear form for the current grid @param grid: Grid @return DataMatrix """ # create bilinear form of the grid gs = grid.getStorage() A = DataMatrix(gs.getSize(), gs.getSize()) A.setAll(0.) createOperationLTwoDotExplicit(A, grid) # multiply the entries with the pdf at the center of the support p = DataVector(gs.getDimension()) q = DataVector(gs.getDimension()) for i in range(gs.getSize()): gpi = gs.getPoint(i) gs.getCoordinates(gpi, p) for j in range(gs.getSize()): gpj = gs.getPoint(j) gs.getCoordinates(gpj, q) y = float(A.get(i, j) * self._U.pdf(p)) A.set(i, j, y) A.set(j, i, y) self._map[self.getKey([gpi, gpj])] = A.get(i, j) return A
def ppf(self, x): # convert the parameter to the right format if isList(x): x = DataVector(x) elif isNumerical(x): x = DataVector([x]) if isinstance(x, DataMatrix): A = x B = DataMatrix(A.getNrows(), A.getNcols()) B.setAll(0.0) elif isinstance(x, DataVector): A = DataMatrix(1, len(x)) A.setRow(0, x) B = DataMatrix(1, len(x)) B.setAll(0) # do the transformation assert A.getNcols() == B.getNcols() == self.trainData.getNcols() op = createOperationInverseRosenblattTransformationKDE(self.trainData) op.doTransformation(A, B) # transform the outcome if isNumerical(x) or isinstance(x, DataVector): return B.get(0, 0) elif isinstance(x, DataMatrix): return B.array()
def sampleGrids(self, filename): ts = self.__learner.getTimeStepsOfInterest() names = self.__params.getNames() names.append('f_\\mathcal{I}(x)') for t in ts: grid, surplus = self.__knowledge.getSparseGridFunction( self._qoi, t) # init gs = grid.getStorage() dim = gs.dim() # ----------------------------------------- # do full grid sampling of sparse grid function # ----------------------------------------- data = eval_fullGrid(4, dim) res = evalSGFunctionMulti(grid, surplus, data) data.transpose() data.appendRow() data.setRow(data.getNrows() - 1, res) data.transpose() # write results writeDataARFF({ 'filename': "%s.t%f.samples.arff" % (filename, t), 'data': data, 'names': names }) # ----------------------------------------- # write sparse grid points to file # ----------------------------------------- data = DataMatrix(gs.size(), dim) data.setAll(0.0) for i in xrange(gs.size()): gp = gs.get(i) v = np.array([gp.getCoord(j) for j in xrange(dim)]) data.setRow(i, DataVector(v)) # write results writeDataARFF({ 'filename': "%s.t%f.gridpoints.arff" % (filename, t), 'data': data, 'names': names }) # ----------------------------------------- # write alpha # ----------------------------------------- writeAlphaARFF("%s.t%f.alpha.arff" % (filename, t), surplus)
def sampleGrids(self, filename): ts = self.__learner.getTimeStepsOfInterest() names = self.__params.getNames() names.append('f_\\mathcal{I}(x)') for t in ts: grid, surplus = self.__knowledge.getSparseGridFunction(self._qoi, t) # init gs = grid.getStorage() dim = gs.dim() # ----------------------------------------- # do full grid sampling of sparse grid function # ----------------------------------------- data = eval_fullGrid(4, dim) res = evalSGFunctionMulti(grid, surplus, data) data.transpose() data.appendRow() data.setRow(data.getNrows() - 1, res) data.transpose() # write results writeDataARFF({'filename': "%s.t%f.samples.arff" % (filename, t), 'data': data, 'names': names}) # ----------------------------------------- # write sparse grid points to file # ----------------------------------------- data = DataMatrix(gs.size(), dim) data.setAll(0.0) for i in xrange(gs.size()): gp = gs.get(i) v = np.array([gp.getCoord(j) for j in xrange(dim)]) data.setRow(i, DataVector(v)) # write results writeDataARFF({'filename': "%s.t%f.gridpoints.arff" % (filename, t), 'data': data, 'names': names}) # ----------------------------------------- # write alpha # ----------------------------------------- writeAlphaARFF("%s.t%f.alpha.arff" % (filename, t), surplus)
def ppf(self, x, shuffle=True): # convert the parameter to the right format x = self._convertEvalPoint(x) # do the transformation if self.dim == 1: op = createOperationInverseRosenblattTransformation1D(self.grid) x_unit = np.ndarray((x.shape[0], x.shape[1])) for i, xi in enumerate(x[:, 0]): x_unit[i, 0] = op.doTransformation1D(self.unnormalized_alpha_vec, xi) # transform the samples to the unit hypercube if self.trans is not None: x_prob = self.trans.unitToProbabilisticMatrix(x_unit) else: x_prob = x # extract the outcome if x_prob.shape[0] == 1 and x_prob.shape[1] == 1: return x_prob[:, 0] else: return x_prob.flatten() else: A_vec = DataMatrix(x) B_vec = DataMatrix(x.shape[0], x.shape[1]) B_vec.setAll(0.0) # do the transformation op = createOperationInverseRosenblattTransformation(self.grid) if shuffle: op.doTransformation(self.unnormalized_alpha_vec, A_vec, B_vec) else: op.doTransformation(self.unnormalized_alpha_vec, A_vec, B_vec, 0) # transform the samples to the unit hypercube B = B_vec.array() if self.trans is not None: B_prob = self.trans.unitToProbabilisticMatrix(B) else: B_prob = B # extract the outcome if x.shape == (1, 1): return B_prob.get(0, 0) else: return B_prob
def ppf(self, x, shuffle=False): x = self._convertEvalPoint(x) x_matrix = DataMatrix(x) res_matrix = DataMatrix(x_matrix.getNrows(), x_matrix.getNcols()) res_matrix.setAll(0.0) # do the transformation opRosen = createOperationInverseRosenblattTransformationKDE(self.dist) if shuffle: opRosen.doShuffledTransformation(x_matrix, res_matrix) else: opRosen.doTransformation(x_matrix, res_matrix) # transform the outcome res = res_matrix.array() if res.shape[0] == 1 and res.shape[1] == 1: return res[0, 0] else: return res
def computeBilinearForm(self, grid): """ Compute bilinear form for the current grid @param grid: Grid @return: DataMatrix """ gs = grid.getStorage() A = DataMatrix(gs.size(), gs.size()) A.setAll(0.) createOperationLTwoDotExplicit(A, grid) # store the result in the hash map for i in xrange(gs.size()): gpi = gs.get(i) for j in xrange(gs.size()): gpj = gs.get(j) key = self.getKey(gpi, gpj) self._map[key] = A.get(i, j) return A
def ppf(self, x): # convert the parameter to the right format if isList(x): x = DataVector(x) elif isNumerical(x): x = DataVector([x]) # do the transformation if self.grid.getStorage().dim() == 1: op = createOperationInverseRosenblattTransformation1D(self.grid) ans = np.ndarray(len(x)) for i, xi in enumerate(x.array()): ans[i] = op.doTransformation1D(self.alpha, xi) if len(ans) == 1: return ans[0] else: return ans else: if isinstance(x, DataMatrix): A = x B = DataMatrix(A.getNrows(), A.getNcols()) B.setAll(0.0) elif isinstance(x, DataVector): A = DataMatrix(1, len(x)) A.setRow(0, x) B = DataMatrix(1, len(x)) B.setAll(0) # do the transformation op = createOperationInverseRosenblattTransformation(self.grid) op.doTransformation(self.alpha, A, B) # extract the outcome if isNumerical(x) or isinstance(x, DataVector): return B.get(0, 0) elif isinstance(x, DataMatrix): return B.array()
def ppf(self, x): # convert the parameter to the right format if isList(x): x = DataVector(x) elif isNumerical(x): x = DataVector([x]) # do the transformation if self.grid.getStorage().dim() == 1: op = createOperationInverseRosenblattTransformation1D(self.grid) ans = np.ndarray(len(x)) for i, xi in enumerate(x.array()): ans[i] = op.doTransformation1D(self.alpha, xi) if len(ans) == 1: return ans[0] else: return ans else: if isinstance(x, DataMatrix): A = x B = DataMatrix(A.getNrows(), A.getNcols()) B.setAll(0.0) elif isinstance(x, DataVector): A = DataMatrix(1, len(x)) A.setRow(0, x) B = DataMatrix(1, len(x)) B.setAll(0) # do the transformation op = createOperationInverseRosenblattTransformation(self.grid) op.doTransformation(self.alpha, A, B) # extract the outcome if isNumerical(x) or isinstance(x, DataVector): return B.get(0, 0) elif isinstance(x, DataMatrix): return B.array()
def cdf(self, x, shuffle=True): # convert the parameter to the right format x = self._convertEvalPoint(x) # transform the samples to the unit hypercube if self.trans is not None: x_unit = self.trans.probabilisticToUnitMatrix(x) else: x_unit = x # do the transformation if self.dim == 1: op = createOperationRosenblattTransformation1D(self.grid) ans = np.ndarray(x.shape[0]) for i, xi in enumerate(x_unit[:, 0]): ans[i] = op.doTransformation1D(self.unnormalized_alpha_vec, xi) if len(ans) == 1: return ans[0] else: return ans else: A = DataMatrix(x_unit) B = DataMatrix(x_unit.shape[0], x_unit.shape[1]) B.setAll(0.0) # do the transformation op = createOperationRosenblattTransformation(self.grid) if shuffle: op.doTransformation(self.alpha_vec, A, B) else: op.doTransformation(self.alpha_vec, A, B, 0) # extract the outcome if x_unit.shape == (1, 1): return B.get(0, 0) else: return B.array()
def createNullVector(self, size, dim): vector = DataMatrix(size, dim) vector.setAll(0) return vector
def createNullVector(self, size, dim): vector = DataMatrix(size, dim) vector.setAll(0) return vector
def var(self, grid, alpha, U, T, mean): r""" Extraction of the expectation the given sparse grid function interpolating the product of function value and pdf. \int\limits_{[0, 1]^d} (f(x) - E(f))^2 * pdf(x) dx """ # extract correct pdf for moment estimation vol, W = self._extractPDFforMomentEstimation(U, T) D = T.getTransformations() # copy the grid, and add a trapezoidal boundary # ngrid = GridDescriptor().fromGrid(grid)\ # .withBorder(BorderTypes.TRAPEZOIDBOUNDARY)\ # .createGrid() # compute nodalValues # ngs = ngrid.getStorage() # nodalValues = DataVector(ngs.size()) # p = DataVector(ngs.dim()) # for i in xrange(ngs.size()): # ngs.get(i).getCoords(p) # nodalValues[i] = evalSGFunction(grid, alpha, p) - mean # # # hierarchize the new function # nalpha = hierarchize(ngrid, nodalValues) ngs = grid.getStorage() ngrid, nalpha = grid, alpha # compute the integral of the product times the pdf acc = DataMatrix(ngs.size(), ngs.size()) acc.setAll(1.) err = 0 for i, dims in enumerate(W.getTupleIndices()): dist = W[i] trans = D[i] # get the objects needed for integrating # the current dimensions gpsi, basisi = project(ngrid, dims) if isinstance(dist, SGDEdist): # project distribution on desired dimensions # get the objects needed for integrating # the current dimensions gpsk, basisk = project(dist.grid, range(len(dims))) # compute the bilinear form tf = TrilinearGaussQuadratureStrategy([dist], trans) A, erri = tf.computeTrilinearFormByList( gpsk, basisk, dist.alpha, gpsi, basisi, gpsi, basisi) else: # we compute the bilinear form of the grids # compute the bilinear form if len(dims) == 1: dist = [dist] trans = [trans] bf = BilinearGaussQuadratureStrategy(dist, trans) A, erri = bf.computeBilinearFormByList(gpsi, basisi, gpsi, basisi) # accumulate the results acc.componentwise_mult(A) # accumulate the error err += acc.sum() / (acc.getNrows() * acc.getNcols()) * erri # compute the variance tmp = DataVector(acc.getNrows()) self.mult(acc, nalpha, tmp) moment = vol * nalpha.dotProduct(tmp) moment = moment - mean**2 return moment, err
def var(self, grid, alpha, U, T, mean): r""" Extraction of the expectation the given sparse grid function interpolating the product of function value and pdf. \int\limits_{[0, 1]^d} (f(x) - E(f))^2 * pdf(x) dx """ # extract correct pdf for moment estimation vol, W = self._extractPDFforMomentEstimation(U, T) D = T.getTransformations() # copy the grid, and add a trapezoidal boundary # ngrid = GridDescriptor().fromGrid(grid)\ # .withBorder(BorderTypes.TRAPEZOIDBOUNDARY)\ # .createGrid() # compute nodalValues # ngs = ngrid.getStorage() # nodalValues = DataVector(ngs.size()) # p = DataVector(ngs.dim()) # for i in xrange(ngs.size()): # ngs.get(i).getCoords(p) # nodalValues[i] = evalSGFunction(grid, alpha, p) - mean # # # hierarchize the new function # nalpha = hierarchize(ngrid, nodalValues) ngs = grid.getStorage() ngrid, nalpha = grid, alpha # compute the integral of the product times the pdf acc = DataMatrix(ngs.size(), ngs.size()) acc.setAll(1.) err = 0 for i, dims in enumerate(W.getTupleIndices()): dist = W[i] trans = D[i] # get the objects needed for integrating # the current dimensions gpsi, basisi = project(ngrid, dims) if isinstance(dist, SGDEdist): # project distribution on desired dimensions # get the objects needed for integrating # the current dimensions gpsk, basisk = project(dist.grid, range(len(dims))) # compute the bilinear form tf = TrilinearGaussQuadratureStrategy([dist], trans) A, erri = tf.computeTrilinearFormByList(gpsk, basisk, dist.alpha, gpsi, basisi, gpsi, basisi) else: # we compute the bilinear form of the grids # compute the bilinear form if len(dims) == 1: dist = [dist] trans = [trans] bf = BilinearGaussQuadratureStrategy(dist, trans) A, erri = bf.computeBilinearFormByList(gpsi, basisi, gpsi, basisi) # accumulate the results acc.componentwise_mult(A) # accumulate the error err += acc.sum() / (acc.getNrows() * acc.getNcols()) * erri # compute the variance tmp = DataVector(acc.getNrows()) self.mult(acc, nalpha, tmp) moment = vol * nalpha.dotProduct(tmp) moment = moment - mean ** 2 return moment, err