def testOperationB(self):
     from pysgpp import Grid, DataVector, DataMatrix
     factory = Grid.createLinearBoundaryGrid(1)
     gen = factory.createGridGenerator()
     gen.regular(2)
     
     alpha = DataVector(factory.getStorage().size())
     p = DataMatrix(1,1)
     beta = DataVector(1)
     
     
     alpha.setAll(0.0)
     p.set(0,0,0.25)
     beta[0] = 1.0
     
     opb = factory.createOperationB()
     opb.mult(beta, p, alpha)
     
     self.failUnlessAlmostEqual(alpha[0], 0.75)
     self.failUnlessAlmostEqual(alpha[1], 0.25)
     self.failUnlessAlmostEqual(alpha[2], 0.5)
     self.failUnlessAlmostEqual(alpha[3], 1.0)
     self.failUnlessAlmostEqual(alpha[4], 0.0)
     
     alpha.setAll(0.0)
     alpha[2] = 1.0
     
     p.set(0,0, 0.25)
     
     beta[0] = 0.0
     
     opb.multTranspose(alpha, p, beta)
     self.failUnlessAlmostEqual(beta[0], 0.5)
示例#2
0
    def currentDiagHess(self, params):
        #return np.ones(params.shape)
#         if hasattr(self, 'H'):
#             return self.H
#         op_l2_dot = createOperationLTwoDotProduct(self.grid)
#         self.H = np.empty((self.grid.getSize(), self.grid.getSize()))
#         u = DataVector(self.grid.getSize())
#         u.setAll(0.0)
#         result = DataVector(self.grid.getSize())
#         for grid_idx in xrange(self.grid.getSize()):
#             u[grid_idx] = 1.0
#             op_l2_dot.mult(u, result)
#             self.H[grid_idx,:] = result.array()
#             u[grid_idx] = 0.0
#         self.H = np.diag(self.H).reshape(1,-1)
#         return self.H
        #import ipdb; ipdb.set_trace()
        size = self._lastseen.shape[0]
        data_matrix = DataMatrix(self._lastseen[:,:self.dim])
        mult_eval = createOperationMultipleEval(self.grid, data_matrix);
        params_DV = DataVector(self.grid.getSize())
        params_DV.setAll(0.)
        results_DV = DataVector(size)
        self.H = np.zeros(self.grid.getSize())
        for i in xrange(self.grid.getSize()):
            params_DV[i] = 1.0
            mult_eval.mult(params_DV, results_DV);
            self.H[i] = results_DV.l2Norm()**2
            params_DV[i] = 0.0
        self.H = self.H.reshape(1,-1)/size
        #import ipdb; ipdb.set_trace() 
        return self.H
示例#3
0
def generateLaplaceMatrix(factory, level, verbose=False):
    from pysgpp import DataVector
    storage = factory.getStorage()
    
    gen = factory.createGridGenerator()
    gen.regular(level)
    
    laplace = factory.createOperationLaplace()
    
    # create vector
    alpha = DataVector(storage.size())
    erg = DataVector(storage.size())

    # create stiffness matrix
    m = DataVector(storage.size(), storage.size())
    m.setAll(0)
    for i in xrange(storage.size()):
        # apply unit vectors
        alpha.setAll(0)
        alpha[i] = 1
        laplace.mult(alpha, erg)
        if verbose:
            print erg, erg.sum()
        m.setColumn(i, erg)

    return m
    def mean(self, grid, alpha, U, T):
        r"""
        Extraction of the expectation the given sparse grid function
        interpolating the product of function value and pdf.

        \int\limits_{[0, 1]^d} f_N(x) * pdf(x) dx
        """
        # extract correct pdf for moment estimation
        vol, W = self._extractPDFforMomentEstimation(U, T)
        D = T.getTransformations()
        # compute the integral of the product
        gs = grid.getStorage()
        acc = DataVector(gs.size())
        acc.setAll(1.)
        tmp = DataVector(gs.size())
        err = 0
        # run over all dimensions
        for i, dims in enumerate(W.getTupleIndices()):
            dist = W[i]
            trans = D[i]

            # get the objects needed for integration the current dimensions
            gpsi, basisi = project(grid, dims)

            if isinstance(dist, SGDEdist):
                # if the distribution is given as a sparse grid function we
                # need to compute the bilinear form of the grids
                # accumulate objects needed for computing the bilinear form
                gpsj, basisj = project(dist.grid, range(len(dims)))

                # compute the bilinear form
                bf = BilinearGaussQuadratureStrategy()
                A, erri = bf.computeBilinearFormByList(gpsi, basisi,
                                                       gpsj, basisj)
                # weight it with the coefficient of the density function
                self.mult(A, dist.alpha, tmp)
            else:
                # the distribution is given analytically, handle them
                # analytically in the integration of the basis functions
                if isinstance(dist, Dist) and len(dims) > 1:
                    raise AttributeError('analytic quadrature not supported for multivariate distributions')
                if isinstance(dist, Dist):
                    dist = [dist]
                    trans = [trans]

                lf = LinearGaussQuadratureStrategy(dist, trans)
                tmp, erri = lf.computeLinearFormByList(gpsi, basisi)

            # print error stats
            # print "%s: %g -> %g" % (str(dims), err, err + D[i].vol() * erri)
            # import ipdb; ipdb.set_trace()

            # accumulate the error
            err += D[i].vol() * erri

            # accumulate the result
            acc.componentwise_mult(tmp)

        moment = alpha.dotProduct(acc)
        return vol * moment, err
 def computeTrilinearFormByRow(self, gpsk, basisk, gpi, basisi, gpj,
                               basisj):
     """
     Compute the trilinear form of two grid point with a list
     of grid points
     @param gpk: list of HashGridIndex
     @param basisk: SG++ Basis for grid indices k
     @param gpi: HashGridIndex
     @param basisi: SG++ Basis for grid indices i
     @param gpj: HashGridIndex
     @param basisj: SG++ Basis for grid indices j
     @return DataVector
     """
     b = DataVector(len(gpsk))
     b.setAll(1.0)
     err = 0.
     # run over all entries
     for k, gpk in enumerate(gpsk):
         # run over all dimensions
         for d in xrange(gpi.dim()):
             # compute trilinear form for one entry
             value, erri = self.getTrilinearFormEntry(
                 gpk, basisk, gpi, basisi, gpj, basisj, d)
             b[k] *= value
             err += erri
     return b, err
    def __computeRanking(self, v, w, A, b):
        """
        Compute ranking for variance estimation

        \argmax_{i \in \A} | w (2 Av + wb) |

        @param v: DataVector, coefficients of known grid points
        @param w: DataVector, estimated coefficients of unknown grid points
        @param A: DataMatrix, stiffness matrix
        @param b: DataVector, squared expectation value contribution
        @return: numpy array, contains the ranking for the given samples
        """
        # update the ranking
        av = DataVector(A.getNrows())
        av.setAll(0.0)
        # = Av
        for i in xrange(A.getNrows()):
            for j in xrange(A.getNcols()):
                av[i] += A.get(i, j) * v[j]
        av.mult(2.)  # 2 * Av
        b.componentwise_mult(w)  # w * b
        av.add(b)  # 2 * Av + w * b
        w.componentwise_mult(av)  # = w * (2 * Av + w * b)
        w.abs()  # = | w * (2 * Av + w * b) |

        return w.array()
    def estimate(self, vol, grid, alpha, f, U, T):
        r"""
        Extraction of the expectation the given sparse grid function
        interpolating the product of function value and pdf.

        \int\limits_{[0, 1]^d} f(x) * pdf(x) dx
        """
        # first: discretize f
        fgrid, falpha, discError = discretize(grid, alpha, f, self.__epsilon,
                                              self.__refnums, self.__pointsNum,
                                              self.level, self.__deg, True)
        # extract correct pdf for moment estimation
        vol, W, pdfError = self.__extractDiscretePDFforMomentEstimation(U, T)
        D = T.getTransformations()

        # compute the integral of the product
        gs = fgrid.getStorage()
        acc = DataVector(gs.size())
        acc.setAll(1.)
        tmp = DataVector(gs.size())
        for i, dims in enumerate(W.getTupleIndices()):
            sgdeDist = W[i]
            # accumulate objects needed for computing the bilinear form
            gpsi, basisi = project(fgrid, dims)
            gpsj, basisj = project(sgdeDist.grid, range(len(dims)))
            A = self.__computeMassMatrix(gpsi, basisi, gpsj, basisj, W, D)
            # A = ScipyQuadratureStrategy(W, D).computeBilinearForm(fgrid)
            self.mult(A, sgdeDist.alpha, tmp)
            acc.componentwise_mult(tmp)

        moment = falpha.dotProduct(acc)
        return vol * moment, discError[1] + pdfError
示例#8
0
    def computeMoments(self, ts=None):
        names = ['time',
                 'iteration',
                 'grid_size',
                 'mean',
                 'meanDiscretizationError',
                 'var',
                 'varDiscretizationError']
        # parameters
        ts = self.__samples.keys()
        nrows = len(ts)
        ncols = len(names)
        data = DataMatrix(nrows, ncols)
        v = DataVector(ncols)

        row = 0
        for t in ts:
            v.setAll(0.0)
            v[0] = t
            v[1] = 0
            v[2] = len(self.__samples[t].values())
            v[3], v[4] = self.mean(ts=[t])
            v[5], v[6] = self.var(ts=[t])

            # write results to matrix
            data.setRow(row, v)
            row += 1

        return {'data': data,
                'names': names}
    def var(self, grid, alpha, U, T, mean):
        r"""
        Estimate the expectation value using Monte-Carlo.

        \frac{1}{N}\sum\limits_{i = 1}^N (f_N(x_i) - E(f))^2

        where x_i \in \Gamma
        """
        # init
        _, W = self._extractPDFforMomentEstimation(U, T)
        moments = np.zeros(self.__npaths)
        vecMean = DataVector(self.__n)
        vecMean.setAll(mean)
        for i in xrange(self.__npaths):
            samples = self.__getSamples(W, T, self.__n)
            res = evalSGFunctionMulti(grid, alpha, samples)
            res.sub(vecMean)
            res.sqr()

            # compute the moment
            moments[i] = res.sum() / (len(res) - 1.)

        # error statistics
        err = np.Inf

        # calculate moment
        return np.sum(moments) / self.__npaths, err
示例#10
0
    def computeMoments(self, ts=None):
        names = [
            'time', 'iteration', 'grid_size', 'mean', 'mean_err',
            'meanConfidenceIntervalBootstrapping_lower',
            'meanConfidenceIntervalBootstrapping_upper', 'var', 'var_err',
            'varConfidenceIntervalBootstrapping_lower',
            'varConfidenceIntervalBootstrapping_upper'
        ]
        # parameters
        ts = list(self.__samples.keys())
        nrows = len(ts)
        ncols = len(names)
        data = DataMatrix(nrows, ncols)
        v = DataVector(ncols)

        row = 0
        for t in np.sort(ts):
            v.setAll(0.0)
            mean = self.mean(ts=[t], iterations=[0])
            var = self.var(ts=[t], iterations=[0])
            numSamples = len(list(self.__samples[t].values()))

            v[0] = t
            v[1] = 0
            v[2] = numSamples
            v[3], v[4] = mean["value"], mean["err"]
            v[5], v[6] = mean["confidence_interval"]
            v[7], v[8] = var["value"], var["err"]
            v[8], v[9] = var["confidence_interval"]

            # write results to matrix
            data.setRow(row, v)
            row += 1

        return {'data': data, 'names': names}
示例#11
0
    def naive_calc_single(self, index):

        numData = self.trainData.getNrows()
        numCoeff = self.grid.getSize()
        seq = self.grid.getStorage().seq(index)
        num = 0
        denom = 0

        tmp = DataVector(numCoeff)
        self.multEval.multTranspose(self.errors, tmp) 

        num = tmp.__getitem__(seq)
        num **= 2

        alpha = DataVector(numCoeff)
        alpha.setAll(0.0)
        alpha.__setitem__(seq, 1.0)

        col = DataVector(numData)
        self.multEval.mult(alpha, col)

        col.sqr()

        denom = col.sum()

        if denom == 0:
            # print "Denominator is zero"
            value = 0
        else:
            value = num/denom 

        return value
 def computeTrilinearFormByRow(self,
                               gpsk, basisk,
                               gpi, basisi,
                               gpj, basisj):
     """
     Compute the trilinear form of two grid point with a list
     of grid points
     @param gpk: list of HashGridIndex
     @param basisk: SG++ Basis for grid indices k
     @param gpi: HashGridIndex
     @param basisi: SG++ Basis for grid indices i
     @param gpj: HashGridIndex
     @param basisj: SG++ Basis for grid indices j
     @return DataVector
     """
     b = DataVector(len(gpsk))
     b.setAll(1.0)
     err = 0.
     # run over all entries
     for k, gpk in enumerate(gpsk):
         # run over all dimensions
         for d in xrange(gpi.dim()):
             # compute trilinear form for one entry
             value, erri = self.getTrilinearFormEntry(gpk, basisk,
                                                      gpi, basisi,
                                                      gpj, basisj,
                                                      d)
             b[k] *= value
             err += erri
     return b, err
示例#13
0
    def testOperationTest_test(self):
        from pysgpp import Grid, DataVector, DataMatrix

        factory = Grid.createLinearBoundaryGrid(1)
        gen = factory.createGridGenerator()
        gen.regular(1)
        
        alpha = DataVector(factory.getStorage().size())        
        
        data = DataMatrix(1,1)
        data.setAll(0.25)
        classes = DataVector(1)
        classes.setAll(1.0)

        testOP = factory.createOperationTest()

        alpha[0] = 0.0
        alpha[1] = 0.0
        alpha[2] = 1.0
        
        c = testOP.test(alpha, data, classes)
        self.failUnless(c > 0.0)
        
        alpha[0] = 0.0
        alpha[1] = 0.0
        alpha[2] = -1.0
        c = testOP.test(alpha, data, classes)
        self.failUnless(c == 0.0)
    def test_1(self):
        storage = self.grid.getStorage()
        coord = DataVector(storage.getDimension())
        num_coeff = self.alpha.__len__()

        #
        # First part
        # 

        values = [self.functor.__call__(storage,i) for i in range(storage.getSize())]
        expect = []
        opEval = createOperationEval(self.grid)

        for j in range(num_coeff):

            row = DataVector(DIM)

            tmp_alpha = DataVector(self.alpha.__len__())
            tmp_alpha.setAll(0.0)
            tmp_alpha.__setitem__(j, 1.0)

            current = 0
            for i in range(self.trainData.getNrows()):
                self.trainData.getRow(i, row)
                current += (self.errors.__getitem__(i) * opEval.eval(tmp_alpha, row)) ** 2
            
            self.accum.__setitem__(j, self.accum.__getitem__(j) * (1-BETA) + BETA * current * abs(self.alpha.__getitem__(j)))
            expect.append(self.accum.__getitem__(j))

        self.assertEqual(values, expect)

        #
        # Second part
        #

        values = [self.functor.__call__(storage,i) for i in range(storage.getSize())]
        expect = []
        opEval = createOperationEval(self.grid)

        for j in range(num_coeff):

            row = DataVector(DIM)

            tmp_alpha = DataVector(self.alpha.__len__())
            tmp_alpha.setAll(0.0)
            tmp_alpha.__setitem__(j, 1.0)

            current = 0
            for i in range(self.trainData.getNrows()):
                self.trainData.getRow(i, row)
                current += (self.errors.__getitem__(i) * opEval.eval(tmp_alpha, row)) ** 2
            
            self.accum.__setitem__(j, self.accum.__getitem__(j) * (1-BETA) + BETA * current * abs(self.alpha.__getitem__(j)))
            expect.append(self.accum.__getitem__(j))

        self.assertEqual(values, expect)
 def setUp(self):
     self.grid = Grid.createLinearGrid(2)  # a simple 2D grid
     self.grid.createGridGenerator().regular(3)  # max level 3 => 17 points
     self.HashGridStorage = self.grid.getStorage()
     alpha = DataVector(self.grid.getSize())
     alpha.setAll(1.0)
     for i in [9, 10, 11, 12]:
         alpha[i] = 0.0
     coarseningFunctor = SurplusCoarseningFunctor(alpha, 4, 0.5)
     self.grid.createGridGenerator().coarsen(coarseningFunctor, alpha)
示例#16
0
文件: ASGCAnalysis.py 项目: SGpp/SGpp
    def writeSensitivityValues(self, filename):
        def keymap(key):
            names = self.__uqManager.getParameters().activeParams().getNames()
            ans = [names[i] for i in key]
            return ",".join(ans)

        # parameters
        ts = self.__knowledge.getAvailableTimeSteps()
        gs = self.__knowledge.getGrid(self._qoi).getStorage()

        n = len(ts)
        n1 = gs.getDimension()
        n2 = 2**n1 - 1
        data = DataMatrix(n, n1 + n2 + 1)
        names = ['time'] + [None] * (n1 + n2)

        for k, t in enumerate(ts):
            # estimated anova decomposition
            anova = self.getAnovaDecomposition(t=t)
            me = anova.getSobolIndices()

            if len(me) != n2:
                import ipdb
                ipdb.set_trace()
            n2 = len(me)
            te = anova.getTotalEffects()
            n1 = len(te)

            v = DataVector(n1 + n2 + 1)
            v.setAll(0.0)
            v[0] = t

            for i, key in enumerate(
                    anova.getSortedPermutations(list(te.keys()))):
                v[i + 1] = te[key]
                if k == 0:
                    names[i + 1] = '"$T_{' + keymap(key) + '}$"'

            for i, key in enumerate(
                    anova.getSortedPermutations(list(me.keys()))):
                v[n1 + i + 1] = me[key]

                if k == 0:
                    names[n1 + 1 + i] = '"$S_{' + keymap(key) + '}$"'

            data.setRow(k, v)

        writeDataARFF({
            'filename': filename + ".sa.stats.arff",
            'data': data,
            'names': names
        })
示例#17
0
 def setUp(self):
     self.size = 42
     self.dim = 5
     self.container = DataContainer(size=self.size, dim=self.dim)
     values = self.container.getValues()
     points = self.container.getPoints()
     self.vectors = []
     for row in xrange(0, self.size):
         vector = DataVector(self.dim)
         vector.setAll(row)
         self.vectors.append(vector)
         points.setRow(row, vector)
         values[row] = row
 def setUp(self):
     self.size = 42
     self.dim = 5
     self.container = DataContainer(size=self.size,dim=self.dim)
     values = self.container.getValues()
     points = self.container.getPoints()
     self.vectors = []
     for row in xrange(0,self.size):
         vector = DataVector(self.dim)
         vector.setAll(row)
         self.vectors.append(vector)
         points.setRow(row,vector)
         values[row] =row
 def test_InconsistentRefinement1Point(self):
     """Dimensionally adaptive refinement using surplus coefficients as local
     error indicator and inconsistent hash refinement.
     
     """
     # point ((3,7), (1,1)) (middle most right) gets larger surplus coefficient
     alpha = DataVector(self.grid.getSize())
     alpha.setAll(1.0)
     alpha[12] = 2.0
     
     functor = SurplusRefinementFunctor(alpha, 1, 0.0)
     refinement = HashRefinementInconsistent()
     refinement.free_refine(self.HashGridStorage, functor)
     
     self.assertEqual(self.grid.getSize(), 17)
示例#20
0
文件: ASGCAnalysis.py 项目: SGpp/SGpp
    def computeStats(self, dtype):
        names = [
            'time',  # 0
            'iteration',  # 1
            'level',  # 2
            'grid_size',  # 3
            'trainMSE',  # 4
            'trainL2Error',  # 5
            'testMSE',  # 6
            'testL2Error',  # 7
            'testL1Error',  # 8
            'testMaxError',  # 9
            'L2ErrorSurpluses'
        ]  # 10

        knowledge = self.__uqManager.getKnowledge()
        ts = knowledge.getAvailableTimeSteps()
        iterations = knowledge.getAvailableIterations()
        nrows = len(ts) * len(iterations)
        ncols = len(names)
        data = DataMatrix(nrows, ncols)
        v = DataVector(ncols)
        v.setAll(0.)
        row = 0

        for t in ts:
            for iteration in iterations:
                v[0] = t
                v[1] = iteration
                v[2] = self.__uqManager.stats.level[dtype][iteration]
                v[3] = self.__uqManager.stats.numberPoints[dtype][iteration]
                v[4] = self.__uqManager.stats.trainMSE[dtype][t][iteration]
                v[5] = self.__uqManager.stats.trainL2Norm[dtype][t][iteration]
                if len(self.__uqManager.stats.testMSE[dtype][t]) == \
                        len(self.__uqManager.stats.trainMSE[dtype][t]):
                    v[6] = self.__uqManager.stats.testMSE[dtype][t][iteration]
                    v[7] = self.__uqManager.stats.testL2Norm[dtype][t][
                        iteration]
                    v[8] = self.__uqManager.stats.testL1Norm[dtype][t][
                        iteration]
                    v[9] = self.__uqManager.stats.testMaxError[dtype][t][
                        iteration]
                v[10] = self.computeL2ErrorSurpluses(self._qoi, t, dtype,
                                                     iteration)
                # write results to matrix
                data.setRow(row, v)
                row += 1
        return {'data': data, 'names': names}
示例#21
0
    def writeSensitivityValues(self, filename):

        def keymap(key):
            names = self.getLearner().getParameters().activeParams().getNames()
            ans = [names[i] for i in key]
            return ",".join(ans)

        # parameters
        ts = self.__knowledge.getAvailableTimeSteps()
        gs = self.__knowledge.getGrid(self._qoi).getStorage()

        n = len(ts)
        n1 = gs.dim()
        n2 = 2 ** n1 - 1
        data = DataMatrix(n, n1 + n2 + 1)
        names = ['time'] + [None] * (n1 + n2)

        for k, t in enumerate(ts):
            # estimated anova decomposition
            anova = self.getAnovaDecomposition(t=t)
            me = anova.getSobolIndices()

            if len(me) != n2:
                import ipdb; ipdb.set_trace()
            n2 = len(me)
            te = anova.getTotalEffects()
            n1 = len(te)

            v = DataVector(n1 + n2 + 1)
            v.setAll(0.0)
            v[0] = t

            for i, key in enumerate(anova.getSortedPermutations(te.keys())):
                v[i + 1] = te[key]
                if k == 0:
                    names[i + 1] = '"$T_{' + keymap(key) + '}$"'

            for i, key in enumerate(anova.getSortedPermutations(me.keys())):
                v[n1 + i + 1] = me[key]

                if k == 0:
                    names[n1 + 1 + i] = '"$S_{' + keymap(key) + '}$"'

            data.setRow(k, v)

        writeDataARFF({'filename': filename + ".sa.stats.arff",
                       'data': data,
                       'names': names})
示例#22
0
    def testOperationEval_eval(self):
        from pysgpp import Grid, DataVector

        factory = Grid.createLinearBoundaryGrid(1)
        gen = factory.createGridGenerator()
        gen.regular(1)
        
        alpha = DataVector(factory.getStorage().size())        
        alpha.setAll(1.0)

        p = DataVector(1)
        p.setAll(0.25)
        
        eval = factory.createOperationEval()

        self.failUnlessAlmostEqual(eval.eval(alpha, p), 1.5)
示例#23
0
    def testOperationEval_eval(self):
        from pysgpp import Grid, DataVector

        factory = Grid.createLinearBoundaryGrid(1)
        gen = factory.createGridGenerator()
        gen.regular(1)

        alpha = DataVector(factory.getStorage().size())
        alpha.setAll(1.0)

        p = DataVector(1)
        p.setAll(0.25)

        eval = factory.createOperationEval()

        self.failUnlessAlmostEqual(eval.eval(alpha, p), 1.5)
示例#24
0
文件: ASGCAnalysis.py 项目: SGpp/SGpp
    def computeMoments(self, iterations=None, ts=None):
        names = [
            'time', 'iteration', 'grid_size', 'mean',
            'meanDiscretizationError',
            'meanConfidenceIntervalBootstrapping_lower',
            'meanConfidenceIntervalBootstrapping_upper', 'var',
            'varDiscretizationError',
            'varConfidenceIntervalBootstrapping_lower',
            'varConfidenceIntervalBootstrapping_upper'
        ]
        # parameters
        if ts is None:
            ts = self.__knowledge.getAvailableTimeSteps()
        if iterations is None:
            iterations = self.__knowledge.getAvailableIterations()
        nrows = len(ts) * len(iterations)
        ncols = len(names)
        data = DataMatrix(nrows, ncols)
        v = DataVector(ncols)

        row = 0
        for t in ts:
            for iteration in iterations:
                size = self.__knowledge.getGrid(qoi=self._qoi,
                                                iteration=iteration).getSize()
                mean = self.mean(ts=[t],
                                 iterations=[iteration],
                                 totalNumIterations=len(iterations))
                var = self.var(ts=[t],
                               iterations=[iteration],
                               totalNumIterations=len(iterations))

                v.setAll(0.0)
                v[0] = t
                v[1] = iteration
                v[2] = size
                v[3], v[4] = mean["value"], mean["err"]
                v[5], v[6] = mean["confidence_interval"]
                v[7], v[8] = var["value"], var["err"]
                v[9], v[10] = var["confidence_interval"]

                # write results to matrix
                data.setRow(row, v)
                row += 1

        return {'data': data, 'names': names}
示例#25
0
    def testQuadratureTruncated(self):
        def f(x):
            return 1.

        grid, alpha = interpolate(f, 2, 3)
        alpha = DataVector(grid.getStorage().getSize())

        for ix in range(0, grid.getStorage().getSize()):
            alpha.setAll(0.0)
            alpha[ix] = 1.
            gp = grid.getStorage().getPoint(ix)

            accLevel = sum(
                [max(1, gp.getLevel(d)) for d in range(gp.getDimension())])
            self.assertTrue(
                createOperationQuadrature(grid).doQuadrature(
                    alpha) == 2**-accLevel, "%g != %g" %
                (createOperationQuadrature(grid).doQuadrature(alpha), 2**
                 -accLevel))
 def computeLinearFormByList(self, gps, basis):
     """
     Compute bilinear form for two lists of grid points
     @param gps: list of HashGridIndex
     @param basis: SG++ basis for grid indices gpsi
     @return: DataMatrix
     """
     b = DataVector(len(gps))
     b.setAll(1.0)
     err = 0.
     # run over all items
     for i, gpi in enumerate(gps):
         # run over all dimensions
         for d in xrange(gpi.dim()):
             # compute linear form for one entry
             value, erri = self.getLinearFormEntry(gpi, basis, d)
             # collect results
             b[i] *= value
             err += erri
     return b, err
 def computeLinearFormByList(self, gps, basis):
     """
     Compute bilinear form for two lists of grid points
     @param gps: list of HashGridIndex
     @param basis: SG++ basis for grid indices gpsi
     @return: DataMatrix
     """
     b = DataVector(len(gps))
     b.setAll(1.0)
     err = 0.
     # run over all items
     for i, gpi in enumerate(gps):
         # run over all dimensions
         for d in xrange(gpi.dim()):
             # compute linear form for one entry
             value, erri = self.getLinearFormEntry(gpi, basis, d)
             # collect results
             b[i] *= value
             err += erri
     return b, err
示例#28
0
    def test_freeRefineSubspaceIsotropic(self):
        """Refine the isotropic middle subspace"""
        alpha = DataVector(self.grid.getSize())
        alpha.setAll(1.0)
        alpha[12] = 2.
        #refinement  stuff
        refinement = HashRefinement()
        decorator = GSGRefinement(refinement)
        # refine a single grid point each time
        functor = SurplusRefinementFunctor(alpha, 1)
        decorator.freeRefineSubspace(self.HashGridStorage, functor)
        for i in xrange(self.grid.getSize()):
            HashGridIndex = self.HashGridStorage.get(i)
            print i, HashGridIndex.toString()

        self.assertEqual(self.grid.getSize(), 15)

        for i in xrange(self.grid.getSize()):
            HashGridIndex = self.HashGridStorage.get(i)
            levelIndex = eval(HashGridIndex.toString())
            self.assertFalse(levelIndex[0] == 4 or levelIndex[2] >= 3)
示例#29
0
    def doLearningIteration(self, set):
        # initialize values
        self.linearSystem = DMSystemMatrix(self.grid, set.getPoints(),
                                           self.specification.getCOperator(),
                                           self.specification.getL())
        size = self.grid.getSize()
        # Reuse data from old alpha vector increasing its dimension
        if self.solver.getReuse() and self.alpha is not None:
            alpha = DataVector(self.alpha)
            alpha.resize(size)
        # Use new alpha vector
        else:
            alpha = DataVector(size)
            alpha.setAll(0.0)
        b = DataVector(size)
        self.linearSystem.generateb(set.getValues(), b)
        # calculates alphas
        self.solver.solve(self.linearSystem, alpha, b, self.solver.getReuse(),
                          False, self.solver.getThreshold())

        return alpha
    def test_freeRefineSubspaceIsotropic(self):
        """Refine the isotropic middle subspace"""
        alpha = DataVector(self.grid.getSize())
        alpha.setAll(1.0)
        alpha[12] = 2.
        #refinement  stuff
        refinement = HashRefinement()
        decorator = GSGRefinement(refinement)
        # refine a single grid point each time
        functor = SurplusRefinementFunctor(alpha,1)
        decorator.freeRefineSubspace(self.HashGridStorage,functor)
        for i in xrange(self.grid.getSize()):
            HashGridIndex = self.HashGridStorage.get(i)
            print i, HashGridIndex.toString()

        self.assertEqual(self.grid.getSize(), 15)
        
        for i in xrange(self.grid.getSize()):
            HashGridIndex = self.HashGridStorage.get(i)
            levelIndex = eval(HashGridIndex.toString())
            self.assertFalse(levelIndex[0] == 4 or levelIndex[2] >= 3)
示例#31
0
    def test_freeRefineSubspaceIsotropic(self):
        """Refine the isotropic middle subspace"""
        alpha = DataVector(self.grid.getSize())
        alpha.setAll(1.0)
        for i in [13, 14, 15, 16]:
            alpha[i] = 2.
        #refinement  stuff
        refinement = HashRefinement()
        decorator = SubspaceRefinement(refinement)
        # refine a single grid point each time
        functor = SurplusRefinementFunctor(alpha, 1)
        decorator.free_refine(self.HashGridStorage, functor)
        for i in range(self.grid.getSize()):
            HashGridPoint = self.HashGridStorage.getPoint(i)

        self.assertEqual(self.grid.getSize(), 33)

        for i in range(self.grid.getSize()):
            HashGridPoint = self.HashGridStorage.getPoint(i)
            levelIndex = eval(HashGridPoint.toString())
            self.assertFalse(levelIndex[0] == 4 or levelIndex[2] == 4)
示例#32
0
def generateBBTMatrix(factory, training, verbose=False):
    from pysgpp import DataVector
    storage = factory.getStorage()

    b = factory.createOperationB()

    alpha = DataVector(storage.getSize())
    temp = DataVector(training.getSize())

    erg = DataVector(alpha.getSize())

    col = 0

    # create B matrix
    m = np.zeros((storage.getSize(), storage.getSize()))

    for i in range(storage.getSize()):
        # apply unit vectors
        temp.setAll(0.0)
        erg.setAll(0.0)
        alpha.setAll(0.0)
        alpha[i] = 1.0
        b.multTranspose(alpha, training, temp)
        b.mult(temp, training, erg)

        #Sets the column in m
        for j in range(storage.getSize()):
            m[j, col] = erg[j]

        col = col + 1

    return m
示例#33
0
def interpolate(f, level, dim, gridType=GridType_Linear, deg=2, trans=None):
    # create a two-dimensional piecewise bi-linear grid
    if gridType == GridType_PolyBoundary:
        grid = Grid.createPolyBoundaryGrid(dim, deg)
    elif gridType == GridType_Poly:
        grid = Grid.createPolyGrid(dim, deg)
    elif gridType == GridType_Linear:
        grid = Grid.createLinearGrid(dim)
    elif gridType == GridType_LinearBoundary:
        grid = Grid.createLinearBoundaryGrid(dim, 1)
    else:
        raise AttributeError

    gridStorage = grid.getStorage()

    # create regular grid
    grid.getGenerator().regular(level)

    # create coefficient vector
    alpha = DataVector(gridStorage.getSize())
    alpha.setAll(0.0)

    # set function values in alpha
    x = DataVector(dim)
    for i in range(gridStorage.getSize()):
        gp = gridStorage.getPoint(i)
        gridStorage.getCoordinates(gp, x)
        p = x.array()

        if trans is not None:
            p = trans.unitToProbabilistic(p)

        if gridStorage.getDimension() == 1:
            p = p[0]
        alpha[i] = f(p)

    # hierarchize
    createOperationHierarchisation(grid).doHierarchisation(alpha)

    return grid, alpha
示例#34
0
    def test_freeRefineSubspaceAnisotropic(self):
        """Refine Anisotropic subspace (x2)"""
        alpha = DataVector(self.grid.getSize())
        alpha.setAll(1.0)
        for i in [9, 10, 11, 12]:
            alpha[i] = 2.
        #refinement  stuff
        refinement = HashRefinement()
        decorator = SubspaceRefinement(refinement)
        # refine a single grid point each time
        functor = SurplusRefinementFunctor(alpha, 1)
        decorator.free_refine(self.HashGridStorage, functor)
        for i in xrange(self.grid.getSize()):
            HashGridIndex = self.HashGridStorage.get(i)
            print i, HashGridIndex.toString()

        self.assertEqual(self.grid.getSize(), 33)

        for i in xrange(self.grid.getSize()):
            HashGridIndex = self.HashGridStorage.get(i)
            levelIndex = eval(HashGridIndex.toString())
            self.assertFalse(levelIndex[0] == 4)
示例#35
0
    def computeStats(self, dtype):
        names = ['time',
                 'iteration',
                 'level',
                 'grid_size',
                 'trainMSE',
                 'trainL2Error',
                 'testMSE',
                 'testL2Error',
                 'L2ErrorSurpluses']

        ts = self.__learner.getTimeStepsOfInterest()
        iterations = self.__learner.iteration + 1
        nrows = len(ts) * iterations
        ncols = len(names)
        data = DataMatrix(nrows, ncols)
        v = DataVector(ncols)
        v.setAll(0.)
        row = 0
        for t in ts:
            for iteration in xrange(iterations):
                v[0] = t
                v[1] = iteration
                v[2] = self.__learner.level[dtype][iteration]
                v[3] = self.__learner.numberPoints[dtype][iteration]
                v[4] = self.__learner.trainAccuracy[dtype][t][iteration]
                n = self.__learner.trainCount[dtype][t][iteration]
                v[5] = float(np.sqrt(v[4] * n))  # == L2 error
                if len(self.__learner.testAccuracy[dtype][t]) == \
                        len(self.__learner.trainAccuracy[dtype][t]):
                    v[6] = self.__learner.testAccuracy[dtype][t][iteration]
                    n = self.__learner.testCount[dtype][t][iteration]
                    v[7] = float(np.sqrt(v[6] * n))  # == L2 error
                v[8] = self.computeL2ErrorSurpluses(self._qoi, t,
                                                    dtype, iteration)
                # write results to matrix
                data.setRow(row, v)
                row += 1
        return {'data': data, 'names': names}
    def test_freeRefineSubspaceAnisotropic(self):
        """Refine Anisotropic subspace (x2)"""
        alpha = DataVector(self.grid.getSize())
        alpha.setAll(1.0)
        for i in [9, 10, 11, 12]:
            alpha[i] = 2.
        #refinement  stuff
        refinement = HashRefinement()
        decorator = SubspaceRefinement(refinement)
        # refine a single grid point each time
        functor = SurplusRefinementFunctor(alpha,1)
        decorator.free_refine(self.HashGridStorage,functor)
        for i in xrange(self.grid.getSize()):
            HashGridIndex = self.HashGridStorage.get(i)
            print i, HashGridIndex.toString()

        self.assertEqual(self.grid.getSize(), 33)
        
        for i in xrange(self.grid.getSize()):
            HashGridIndex = self.HashGridStorage.get(i)
            levelIndex = eval(HashGridIndex.toString())
            self.assertFalse(levelIndex[0] == 4)
示例#37
0
 def testHatRegular1D(self):
     from pysgpp import Grid, DataVector
     
     factory = Grid.createLinearGrid(1)
     storage = factory.getStorage()
     
     gen = factory.createGridGenerator()
     gen.regular(7)
     
     laplace = factory.createOperationLaplace()
   
     
     alpha = DataVector(storage.size())
     result = DataVector(storage.size())
     
     alpha.setAll(1.0)
     
     laplace.mult(alpha, result)
     
     for seq in xrange(storage.size()):
         index = storage.get(seq)
         level, _ = index.get(0)
         self.failUnlessAlmostEqual(result[seq], pow(2.0, level+1))
示例#38
0
def generateBTMatrixPython(factory, training, verbose=False):
    from pysgpp import DataVector
    storage = factory.getStorage()

    b = factory.createOperationB()

    alpha = DataVector(storage.getSize())
    temp = DataVector(training.getSize())

    # create BT matrix
    m = DataVector(training.getSize(), storage.getSize())

    for i in range(storage.getSize()):
        # apply unit vectors
        temp.setAll(0.0)
        alpha.setAll(0.0)
        alpha[i] = 1.0
        b.multTranspose(alpha, training, temp)

        #Sets the column in m
        m.setColumn(i, temp)

    return m
示例#39
0
def generateBTMatrix(factory, training, verbose=False):
    from pysgpp import DataVector, DataMatrix
    storage = factory.getStorage()
       
    b = factory.createOperationB()
    
    alpha = DataVector(storage.size())
    temp = DataVector(training.getNrows())
    
    # create BT matrix
    m = DataMatrix(training.getNrows(), storage.size())
    
    for i in xrange(storage.size()):
        # apply unit vectors
        temp.setAll(0.0)
        alpha.setAll(0.0)
        alpha[i] = 1.0
        b.multTranspose(alpha, training, temp)
        
        #Sets the column in m       
        m.setColumn(i, temp)
        
    return m
 def computeBilinearFormByRow(self, gpi, basisi, gpsj, basisj):
     """
     Compute the bilinear form of one grid point with a list
     of grid points
     @param gpi: HashGridIndex
     @param basisi: SG++ Basis for grid indices i
     @param gps: list of HashGridIndex
     @param basisj: SG++ Basis for grid indices j
     @return DataVector
     """
     b = DataVector(len(gpsj))
     b.setAll(1.0)
     err = 0.
     # run over all entries
     for j, gpj in enumerate(gpsj):
         # run over all dimensions
         for d in xrange(gpi.dim()):
             # compute bilinear form for one entry
             s, erri = self.getBilinearFormEntry(gpi, basisi, gpj, basisj, d)
             # combine different dimensions
             b[j] *= s
             err += erri
     return b, err
示例#41
0
    def pdf(self, x):
        # convert the parameter to the right format
        if isList(x):
            x = DataVector(x)
        elif isNumerical(x):
            x = DataVector([x])

        if isinstance(x, DataMatrix):
            A = x
            res = DataVector(A.getNrows())
            res.setAll(0.0)
        elif isinstance(x, DataVector):
            A = DataMatrix(1, len(x))
            A.setRow(0, x)
            res = DataVector(1)
            res.setAll(0)

        self.dist.pdf(A, res)

        if len(res) == 1:
            return res[0]
        else:
            return res.array()
示例#42
0
    def pdf(self, x):
        # convert the parameter to the right format
        if isList(x):
            x = DataVector(x)
        elif isNumerical(x):
            x = DataVector([x])

        if isinstance(x, DataMatrix):
            A = x
            res = DataVector(A.getNrows())
            res.setAll(0.0)
        elif isinstance(x, DataVector):
            A = DataMatrix(1, len(x))
            A.setRow(0, x)
            res = DataVector(1)
            res.setAll(0)

        self.dist.pdf(A, res)

        if len(res) == 1:
            return res[0]
        else:
            return res.array()
示例#43
0
def generateBTMatrix(factory, training, verbose=False):
    from pysgpp import DataVector, DataMatrix
    storage = factory.getStorage()
       
    b = createOperationMultipleEval(factory, training)
    
    alpha = DataVector(storage.getSize())
    temp = DataVector(training.getNrows())
    
    # create BT matrix
    m = DataMatrix(training.getNrows(), storage.getSize())
    
    for i in range(storage.getSize()):
      
        # apply unit vectors
        temp.setAll(0.0)
        alpha.setAll(0.0)
        alpha[i] = 1.0
        b.mult(alpha, temp)
        
        #Sets the column in m       
        m.setColumn(i, temp)
        
    return m
示例#44
0
 def computeBilinearFormByRow(self, gpi, basisi, gpsj, basisj):
     """
     Compute the bilinear form of one grid point with a list
     of grid points
     @param gpi: HashGridIndex
     @param basisi: SG++ Basis for grid indices i
     @param gps: list of HashGridIndex
     @param basisj: SG++ Basis for grid indices j
     @return DataVector
     """
     b = DataVector(len(gpsj))
     b.setAll(1.0)
     err = 0.
     # run over all entries
     for j, gpj in enumerate(gpsj):
         # run over all dimensions
         for d in xrange(gpi.dim()):
             # compute bilinear form for one entry
             s, erri = self.getBilinearFormEntry(gpi, basisi, gpj, basisj,
                                                 d)
             # combine different dimensions
             b[j] *= s
             err += erri
     return b, err
示例#45
0
    def computeMoments(self, iterations=None, ts=None):
        names = ['time',
                 'iteration',
                 'grid_size',
                 'mean',
                 'meanDiscretizationError',
                 'var',
                 'varDiscretizationError']
        # parameters
        if ts is None:
            ts = self.__knowledge.getAvailableTimeSteps()
        if iterations is None:
            iterations = self.__knowledge.getAvailableIterations()
        nrows = len(ts) * len(iterations)
        ncols = len(names)
        data = DataMatrix(nrows, ncols)
        v = DataVector(ncols)

        row = 0
        for t in ts:
            for iteration in iterations:
                size = self.__knowledge.getGrid(qoi=self._qoi,
                                                iteration=iteration).getSize()
                v.setAll(0.0)
                v[0] = t
                v[1] = iteration
                v[2] = size
                v[3], v[4] = self.mean(ts=[t], iterations=[iteration])
                v[5], v[6] = self.var(ts=[t], iterations=[iteration])

                # write results to matrix
                data.setRow(row, v)
                row += 1

        return {'data': data,
                'names': names}
示例#46
0
    def testOperationTest_test(self):
        from pysgpp import Grid, DataVector, DataMatrix

        factory = Grid.createLinearGrid(1)
        gen = factory.createGridGenerator()
        gen.regular(1)

        alpha = DataVector(factory.getStorage().size())

        data = DataMatrix(1, 1)
        data.setAll(0.25)
        classes = DataVector(1)
        classes.setAll(1.0)

        testOP = factory.createOperationTest()

        alpha.setAll(1.0)
        c = testOP.test(alpha, data, classes)
        self.failUnless(c > 0.0)

        alpha.setAll(-1.0)
        c = testOP.test(alpha, data, classes)
        self.failUnless(c == 0.0)
示例#47
0
def generateBBTMatrix(factory, training, verbose=False):
    from pysgpp import DataVector, DataMatrix
    storage = factory.getStorage()

    b = createOperationMultipleEval(factory, training)

    alpha = DataVector(storage.size())
    erg = DataVector(len(alpha))
    temp = DataVector(training.getNrows())

    # create B matrix
    m = DataMatrix(storage.size(), storage.size())
    for i in xrange(storage.size()):
        # apply unit vectors
        temp.setAll(0.0)
        erg.setAll(0.0)
        alpha.setAll(0.0)
        alpha[i] = 1.0
        b.mult(alpha, temp)
        b.multTranspose(temp, erg)
        # Sets the column in m
        m.setColumn(i, erg)

    return m
示例#48
0
def generateBBTMatrix(factory, training, verbose=False):
    from pysgpp import DataVector, DataMatrix
    storage = factory.getStorage()

    b = factory.createOperationB()

    alpha = DataVector(storage.size())
    erg = DataVector(len(alpha))
    temp = DataVector(training.getNrows())

    # create B matrix
    m = DataMatrix(storage.size(), storage.size())
    for i in xrange(storage.size()):
        # apply unit vectors
        temp.setAll(0.0)
        erg.setAll(0.0)
        alpha.setAll(0.0)
        alpha[i] = 1.0
        b.multTranspose(alpha, training, temp)
        b.mult(temp, training, erg)
        #Sets the column in m
        m.setColumn(i, erg)

    return m
示例#49
0
    def doLearningIteration(self, points):
        """
        Interpolates the given points with the current grid
        @param points: interpolation points
        @return: Return hierarchical surpluses
        """
        gs = self.grid.getStorage()

        # assert that the number of dimensions of the data is the same
        # as the grids
        assert gs.dim() == points.getDim()

        nodalValues = DataVector(gs.size())
        nodalValues.setAll(0.0)

        # interpolation on nodal basis
        p = DataVector(gs.dim())
        cnt = 0
        for i in xrange(gs.size()):
            gp = gs.get(i)
            gp.getCoords(p)
            x = tuple(p.array())
            if x not in points:
                # # search for 2*d closest grid points
                # q = DataVector(gs.dim())
                # l = np.array([])
                # for j in xrange(gs.size()):
                #     gs.get(j).getCoords(q)
                #     q.sub(p)
                #     l = np.append(l, q.l2Norm())

                # n = min(gs.size(), gs.dim())

                # ixs = np.argsort(l)
                # # nodalValues[i] = np.mean(l[ixs[:n]])
                nodalValues[i] = 0.0
                print p, nodalValues[i]
                cnt += 1
            else:
                nodalValues[i] = float(points[x])

        if cnt > 0:
            print '%i/%i of the grid points have \
                   been set to 0' % (cnt, gs.size())
            pdb.set_trace()

        # hierarchization
        alpha = hierarchize(self.grid, nodalValues)

        # -----------------------------------------
        # check if interpolation property is given
        #         fig, _ = plotNodal3d(A)
        #         fig.show()
        #         fig, _ = plotSGNodal3d(self.grid, alpha)
        #         fig.show()
        #         fig, _ = plotSG3d(self.grid, alpha)
        #         fig.show()

        err, _ = checkInterpolation(self.grid,
                                    alpha,
                                    nodalValues,
                                    epsilon=1e-12)

        if len(err) > 0:
            print "interpolation property not met"
            pdb.set_trace()
        # -----------------------------------------

        return alpha
示例#50
0
    def doLearningIteration(self, points):
        """
        Interpolates the given points with the current grid
        @param points: interpolation points
        @return: Return hierarchical surpluses
        """
        gs = self.grid.getStorage()

        # assert that the number of dimensions of the data is the same
        # as the grids
        assert gs.dim() == points.getDim()

        nodalValues = DataVector(gs.size())
        nodalValues.setAll(0.0)

        # interpolation on nodal basis
        p = DataVector(gs.dim())
        cnt = 0
        for i in xrange(gs.size()):
            gp = gs.get(i)
            gp.getCoords(p)
            x = tuple(p.array())
            if x not in points:
                # # search for 2*d closest grid points
                # q = DataVector(gs.dim())
                # l = np.array([])
                # for j in xrange(gs.size()):
                #     gs.get(j).getCoords(q)
                #     q.sub(p)
                #     l = np.append(l, q.l2Norm())

                # n = min(gs.size(), gs.dim())

                # ixs = np.argsort(l)
                # # nodalValues[i] = np.mean(l[ixs[:n]])
                nodalValues[i] = 0.0
                print p, nodalValues[i]
                cnt += 1
            else:
                nodalValues[i] = float(points[x])

        if cnt > 0:
            print '%i/%i of the grid points have \
                   been set to 0' % (cnt, gs.size())
            pdb.set_trace()

        # hierarchization
        alpha = hierarchize(self.grid, nodalValues)

        # -----------------------------------------
        # check if interpolation property is given
#         fig, _ = plotNodal3d(A)
#         fig.show()
#         fig, _ = plotSGNodal3d(self.grid, alpha)
#         fig.show()
#         fig, _ = plotSG3d(self.grid, alpha)
#         fig.show()

        err, _ = checkInterpolation(self.grid, alpha, nodalValues, epsilon=1e-12)

        if len(err) > 0:
            print "interpolation property not met"
            pdb.set_trace()
        # -----------------------------------------

        return alpha
示例#51
0
# create a two-dimensional piecewise bilinear grid
dim = 2
grid = Grid.createLinearGrid(dim)
gridStorage = grid.getStorage()
print "dimensionality:         {}".format(gridStorage.dim())

# create regular grid, level 3
level = 3
gridGen = grid.createGridGenerator()
gridGen.regular(level)
print "number of grid points:  {}".format(gridStorage.size())

# create coefficient vector
alpha = DataVector(gridStorage.size())
alpha.setAll(0.0)
print "length of alpha vector: {}".format(len(alpha))

# set function values in alpha
f = lambda x0, x1: 16.0 * (x0 - 1.0) * x0 * (x1 - 1.0) * x1
for i in xrange(gridStorage.size()):
    gp = gridStorage.get(i)
    alpha[i] = f(gp.getCoord(0), gp.getCoord(1))
print "alpha before hierarchization: {}".format(alpha)

# hierarchize
createOperationHierarchisation(grid).doHierarchisation(alpha)
print "alpha after hierarchization:  {}".format(alpha)

# evaluate
p = DataVector(dim)
示例#52
0
# create a two-dimensional piecewise bilinear grid
dim = 2
grid = Grid.createLinearGrid(dim)
gridStorage = grid.getStorage()
print "dimensionality:         {}".format(gridStorage.dim())

# create regular grid, level 3
level = 3
gridGen = grid.createGridGenerator()
gridGen.regular(level)
print "number of grid points:  {}".format(gridStorage.size())

# create coefficient vector
alpha = DataVector(gridStorage.size())
alpha.setAll(0.0)
print "length of alpha vector: {}".format(len(alpha))

# set function values in alpha
f = lambda x0, x1: 16.0 * (x0-1.0)*x0 * (x1-1.0)*x1
for i in xrange(gridStorage.size()):
    gp = gridStorage.get(i)
    alpha[i] = f(gp.getCoord(0), gp.getCoord(1))
print "alpha before hierarchization: {}".format(alpha)

# hierarchize
createOperationHierarchisation(grid).doHierarchisation(alpha)
print "alpha after hierarchization:  {}".format(alpha)

# evaluate
p = DataVector(dim)
示例#53
0
def __doMarginalize(grid, alpha, dd, measure=None):
    gs = grid.getStorage()

    dim = gs.dim()

    if dim < 2:
        raise AttributeError("The grid has to be at least of dimension 2")

    if dd >= dim:
        raise AttributeError("The grid has only %i dimensions, so I can't \
                             integrate over %i" % (dim, dd))

    # create new grid
    n_dim = dim - 1
    n_grid = createGrid(grid, n_dim)
    n_gs = n_grid.getStorage()

    # insert grid points
    n_gp = HashGridIndex(n_dim)
    for i in xrange(gs.size()):
        gp = gs.get(i)
        for d in range(dim):
            if d == dd:
                # omit marginalization direction
                continue
            elif d < dd:
                n_gp.set(d, gp.getLevel(d), gp.getIndex(d))
            else:
                n_gp.set(d - 1, gp.getLevel(d), gp.getIndex(d))

        # insert grid point
        if not n_gs.has_key(n_gp):
            n_gs.insert(n_gp)

    n_gs.recalcLeafProperty()

    # create coefficient vector
    n_alpha = DataVector(n_gs.size())
    n_alpha.setAll(0.0)

    # set function values for n_alpha
    for i in xrange(gs.size()):
        gp = gs.get(i)

        for d in range(dim):
            if d == dd:
                dd_level = gp.getLevel(d)
                dd_index = gp.getIndex(d)
            elif d < dd:
                n_gp.set(d, gp.getLevel(d), gp.getIndex(d))
            else:
                n_gp.set(d - 1, gp.getLevel(d), gp.getIndex(d))

        if not n_gs.has_key(n_gp):
            raise Exception("This should not happen!")

        # compute the integral of the given basis
        if measure is None:
            q, err = getIntegral(grid, dd_level, dd_index), 0.
        else:
            dist, trans = measure[0][dd], measure[1][dd]
            lf = LinearGaussQuadratureStrategy([dist], [trans])
            basis = getBasis(grid)
            gpdd = HashGridIndex(1)
            gpdd.set(0, dd_level, dd_index)
            q, err = lf.computeLinearFormByList([gpdd], basis)
            q = q[0]

        # search for the corresponding index
        j = n_gs.seq(n_gp)
        n_alpha[j] += alpha[i] * q

    return n_grid, n_alpha, err
示例#54
0
class TestPersistentRefinementOperator(unittest.TestCase):
    def setUp(self):

        #
        # Grid
        #

        self.grid = Grid.createLinearGrid(DIM)
        self.grid_gen = self.grid.createGridGenerator()
        self.grid_gen.regular(LEVEL)

        #
        # trainData, classes, errors
        #

        xs = []
        DELTA = 0.05
        DELTA_RECI = int(1 / DELTA)

        for i in xrange(DELTA_RECI):
            for j in xrange(DELTA_RECI):
                xs.append([DELTA * i, DELTA * j])

        random.seed(1208813)
        ys = [random.randint(-10, 10) for i in xrange(DELTA_RECI**2)]

        # print xs
        # print ys

        self.trainData = DataMatrix(xs)
        self.classes = DataVector(ys)
        self.alpha = DataVector([3, 6, 7, 9, -1])

        self.errors = DataVector(DELTA_RECI**2)
        coord = DataVector(DIM)

        for i in xrange(self.trainData.getNrows()):
            self.trainData.getRow(i, coord)
            self.errors.__setitem__(
                i, self.classes[i] - self.grid.eval(self.alpha, coord))

        #
        # Functor
        #

        self.functor = PersistentErrorRefinementFunctor(self.alpha, self.grid)
        self.functor.setTrainDataset(self.trainData)
        self.functor.setClasses(self.classes)
        self.functor.setErrors(self.errors)

        self.accum = DataVector(self.alpha.__len__())
        self.accum.setAll(0.0)

    def test_1(self):
        storage = self.grid.getStorage()
        coord = DataVector(storage.dim())
        num_coeff = self.alpha.__len__()

        #
        # First part
        #

        values = [
            self.functor.__call__(storage, i) for i in xrange(storage.size())
        ]
        expect = []

        for j in xrange(num_coeff):

            row = DataVector(DIM)

            tmp_alpha = DataVector(self.alpha.__len__())
            tmp_alpha.setAll(0.0)
            tmp_alpha.__setitem__(j, 1.0)

            current = 0
            for i in xrange(self.trainData.getNrows()):
                self.trainData.getRow(i, row)
                current += (self.errors.__getitem__(i) *
                            self.grid.eval(tmp_alpha, row))**2

            self.accum.__setitem__(
                j,
                self.accum.__getitem__(j) * (1 - BETA) +
                BETA * current * abs(self.alpha.__getitem__(j)))
            expect.append(self.accum.__getitem__(j))

        self.assertEqual(values, expect)

        #
        # Second part
        #

        values = [
            self.functor.__call__(storage, i) for i in xrange(storage.size())
        ]
        expect = []

        for j in xrange(num_coeff):

            row = DataVector(DIM)

            tmp_alpha = DataVector(self.alpha.__len__())
            tmp_alpha.setAll(0.0)
            tmp_alpha.__setitem__(j, 1.0)

            current = 0
            for i in xrange(self.trainData.getNrows()):
                self.trainData.getRow(i, row)
                current += (self.errors.__getitem__(i) *
                            self.grid.eval(tmp_alpha, row))**2

            self.accum.__setitem__(
                j,
                self.accum.__getitem__(j) * (1 - BETA) +
                BETA * current * abs(self.alpha.__getitem__(j)))
            expect.append(self.accum.__getitem__(j))

        self.assertEqual(values, expect)