def __computeRanking(self, v, A, b):
        """
        Compute ranking for variance estimation

        \argmax_{i \in \A} | v (2 Av - vb) |

        @param v: DataVector, coefficients of known grid points
        @param A: DataMatrix, stiffness matrix
        @param b: DataVector, squared expectation value contribution
        @return: numpy array, contains the ranking for the given samples
        """
        # update the ranking
        av = DataVector(A.getNrows())
        av.setAll(0.0)
        # = Av
        for i in xrange(A.getNrows()):
            for j in xrange(A.getNcols()):
                av[i] += A.get(i, j) * v[j]
        av.mult(2.)  # = 2 * Av
        b.componentwise_mult(v)  # = v * b
        av.sub(b)  # = 2 * Av - v * b

        w = DataVector(v)
        w.componentwise_mult(av)  # = v * (2 * Av - v * b)
        w.abs()  # = | v * (2 * Av - v * b) |

        return w.array()
Exemple #2
0
    def __computeRanking(self, v, A, b):
        """
        Compute ranking for variance estimation

        \argmax_{i \in \A} | v (2 Av - vb) |

        @param v: DataVector, coefficients of known grid points
        @param A: DataMatrix, stiffness matrix
        @param b: DataVector, squared expectation value contribution
        @return: numpy array, contains the ranking for the given samples
        """
        # update the ranking
        av = DataVector(A.getNrows())
        av.setAll(0.0)
        # = Av
        for i in xrange(A.getNrows()):
            for j in xrange(A.getNcols()):
                av[i] += A.get(i, j) * v[j]
        av.mult(2.)  # = 2 * Av
        b.componentwise_mult(v)  # = v * b
        av.sub(b)  # = 2 * Av - v * b

        w = DataVector(v)
        w.componentwise_mult(av)  # = v * (2 * Av - v * b)
        w.abs()  # = | v * (2 * Av - v * b) |

        return w.array()
    def testOps(self):
        from pysgpp import DataVector
        # sum
        self.assertAlmostEqual(self.d_rand.sum(), sum(self.l_rand_total))

        # sqr
        d = DataVector(self.d_rand)
        d.sqr()
        for i in xrange(self.N):
            self.assertEqual(self.d_rand[i]**2, d[i])

        # abs
        d = DataVector(self.d_rand)
        d.abs()
        for i in xrange(self.N):
            self.assertEqual(abs(self.d_rand[i]), d[i])

        # componentwise_mult
        d = DataVector(self.d_rand)
#	d2 = DataVector(self.nrows, self.ncols)
	d2 = DataVector(self.N)
        for i in xrange(self.N):
            d2[i] = i
	d.componentwise_mult(d2)
        for i in xrange(self.N):
            self.assertEqual(self.d_rand[i]*i, d[i])

        # componentwise_div
        d = DataVector(self.d_rand)
        for i in xrange(self.N):
            d2[i] = i+1
	d.componentwise_div(d2)
        for i in xrange(self.N):
            self.assertEqual(self.d_rand[i]/(i+1), d[i])
    def testOps(self):
        from pysgpp import DataVector
        # sum
        self.assertAlmostEqual(self.d_rand.sum(), sum(self.l_rand_total))

        # sqr
        d = DataVector(self.d_rand)
        d.sqr()
        for i in xrange(self.N):
            self.assertEqual(self.d_rand[i]**2, d[i])

        # abs
        d = DataVector(self.d_rand)
        d.abs()
        for i in xrange(self.N):
            self.assertEqual(abs(self.d_rand[i]), d[i])

        # componentwise_mult
        d = DataVector(self.d_rand)
        #	d2 = DataVector(self.nrows, self.ncols)
        d2 = DataVector(self.N)
        for i in xrange(self.N):
            d2[i] = i
        d.componentwise_mult(d2)
        for i in xrange(self.N):
            self.assertEqual(self.d_rand[i] * i, d[i])

        # componentwise_div
        d = DataVector(self.d_rand)
        for i in xrange(self.N):
            d2[i] = i + 1
        d.componentwise_div(d2)
        for i in xrange(self.N):
            self.assertEqual(self.d_rand[i] / (i + 1), d[i])
Exemple #5
0
def computeErrors(jgrid, jalpha,
                  grid1, alpha1,
                  grid2, alpha2,
                  n=200):
    """
    Compute some errors to estimate the quality of the
    interpolation.
    @param jgrid: Grid, new discretization
    @param jalpha: DataVector, new surpluses
    @param grid1: Grid, old discretization
    @param alpha1: DataVector, old surpluses
    @param grid2: Grid, old discretization
    @param alpha2: DataVector, old surpluses
    @return: tuple(<float>, <float>), maxdrift, l2norm
    """
    jgs = jgrid.getStorage()

    # create control samples
    samples = DataMatrix(np.random.rand(n, jgs.getDimension()))

    # evaluate the sparse grid functions
    jnodalValues = evalSGFunctionMulti(jgrid, jalpha, samples)

    # eval grids
    nodalValues1 = evalSGFunctionMulti(grid1, alpha1, samples)
    nodalValues2 = evalSGFunctionMulti(grid2, alpha2, samples)

    # compute errors
    p = DataVector(jgs.getDimension())
    err = DataVector(n)
    for i in range(n):
        samples.getRow(i, p)
        y = nodalValues1[i] * nodalValues2[i]
        if abs(jnodalValues[i]) > 1e100:
            err[i] = 0.0
        else:
            err[i] = abs(y - jnodalValues[i])

    # get error statistics
    # l2
    l2norm = err.l2Norm()
    # maxdrift
    err.abs()
    maxdrift = err.max()

    return maxdrift, l2norm
def computeErrors(jgrid, jalpha,
                  grid1, alpha1,
                  grid2, alpha2,
                  n=200):
    """
    Compute some errors to estimate the quality of the
    interpolation.
    @param jgrid: Grid, new discretization
    @param jalpha: DataVector, new surpluses
    @param grid1: Grid, old discretization
    @param alpha1: DataVector, old surpluses
    @param grid2: Grid, old discretization
    @param alpha2: DataVector, old surpluses
    @return: tuple(<float>, <float>), maxdrift, l2norm
    """
    jgs = jgrid.getStorage()

    # create control samples
    samples = DataMatrix(np.random.rand(n, jgs.dim()))

    # evaluate the sparse grid functions
    jnodalValues = evalSGFunctionMulti(jgrid, jalpha, samples)

    # eval grids
    nodalValues1 = evalSGFunctionMulti(grid1, alpha1, samples)
    nodalValues2 = evalSGFunctionMulti(grid2, alpha2, samples)

    # compute errors
    p = DataVector(jgs.dim())
    err = DataVector(n)
    for i in xrange(n):
        samples.getRow(i, p)
        y = nodalValues1[i] * nodalValues2[i]
        if abs(jnodalValues[i]) > 1e100:
            err[i] = 0.0
        else:
            err[i] = abs(y - jnodalValues[i])

    # get error statistics
    # l2
    l2norm = err.l2Norm()
    # maxdrift
    err.abs()
    maxdrift = err.max()

    return maxdrift, l2norm
def computeErrors(jgrid, jalpha, grid, alpha, f, n=200):
    """
    Compute some errors to estimate the quality of the
    interpolation.
    @param jgrid: Grid, new discretization
    @param jalpha: DataVector, new surpluses
    @param grid: Grid, old discretization
    @param alpha: DataVector, old surpluses
    @param f: function, to be interpolated
    @param n: int, number of Monte Carlo estimates for error estimation
    @return: tuple(<float>, <float>), maxdrift, l2norm
    """
    jgs = jgrid.getStorage()

    # create control samples
    samples = DataMatrix(np.random.rand(n, jgs.dim()))

    # evaluate the sparse grid functions
    jnodalValues = evalSGFunctionMulti(jgrid, jalpha, samples)
    nodalValues = evalSGFunctionMulti(grid, alpha, samples)

    # compute errors
    p = DataVector(jgs.dim())
    err = DataVector(n)
    for i in xrange(n):
        samples.getRow(i, p)
        y = f(p.array(), nodalValues[i])
        err[i] = abs(y - jnodalValues[i])

    # get error statistics
    # l2
    l2norm = err.l2Norm()
    # maxdrift
    err.abs()
    maxdrift = err.max()

    return maxdrift, l2norm
Exemple #8
0
def computeErrors(jgrid, jalpha, grid, alpha, f, n=200):
    """
    Compute some errors to estimate the quality of the
    interpolation.
    @param jgrid: Grid, new discretization
    @param jalpha: DataVector, new surpluses
    @param grid: Grid, old discretization
    @param alpha: DataVector, old surpluses
    @param f: function, to be interpolated
    @param n: int, number of Monte Carlo estimates for error estimation
    @return: tuple(<float>, <float>), maxdrift, l2norm
    """
    jgs = jgrid.getStorage()

    # create control samples
    samples = DataMatrix(np.random.rand(n, jgs.dim()))

    # evaluate the sparse grid functions
    jnodalValues = evalSGFunctionMulti(jgrid, jalpha, samples)
    nodalValues = evalSGFunctionMulti(grid, alpha, samples)

    # compute errors
    p = DataVector(jgs.dim())
    err = DataVector(n)
    for i in xrange(n):
        samples.getRow(i, p)
        y = f(p.array(), nodalValues[i])
        err[i] = abs(y - jnodalValues[i])

    # get error statistics
    # l2
    l2norm = err.l2Norm()
    # maxdrift
    err.abs()
    maxdrift = err.max()

    return maxdrift, l2norm