예제 #1
0
def refineGrid(grid, alpha, f, refnums):
    """
    This function refines a sparse grid function refnum times.

    Arguments:
    grid -- Grid sparse grid from pysgpp
    alpha -- DataVector coefficient vector
    f -- function to be interpolated
    refnums -- int number of refinement steps

    Return nothing
    """
    gs = grid.getStorage()
    gridGen = grid.getGenerator()
    x = DataVector(gs.getDimension())
    for _ in range(refnums):
        # refine a single grid point each time
        gridGen.refine(SurplusRefinementFunctor(alpha, 1))

        # extend alpha vector (new entries uninitialized)
        alpha.resizeZero(gs.getSize())

        # set function values in alpha
        for i in range(gs.getSize()):
            gs.getCoordinates(gs.getPoint(i), x)
            alpha[i] = f(x)

        # hierarchize
        createOperationHierarchisation(grid).doHierarchisation(alpha)
예제 #2
0
def hierarchize(grid, nodalValues, ignore=None):
    try:
        # if ignore is None or len(ignore) > 0:
        alpha = DataVector(nodalValues)
        createOperationHierarchisation(grid).doHierarchisation(alpha)
        return alpha
#         print "using brute force hierarchization"
#         return hierarchizeBruteForce(grid, nodalValues, ignore)
    except Exception, e:
        print e
예제 #3
0
def hierarchize(grid, nodalValues, ignore=None):
    try:
        # if ignore is None or len(ignore) > 0:
        alpha = DataVector(nodalValues)
        createOperationHierarchisation(grid).doHierarchisation(alpha)
        return alpha


#         print "using brute force hierarchization"
#         return hierarchizeBruteForce(grid, nodalValues, ignore)
    except Exception, e:
        print e
예제 #4
0
def addConst(grid, alpha, c, y):
    alpha_vec = DataVector(alpha)
    opHier = createOperationHierarchisation(grid)
    opHier.doDehierarchisation(alpha_vec)
    for i in range(alpha_vec.getSize()):
        alpha_vec[i] = c * alpha_vec[i] + y
    opHier.doHierarchisation(alpha_vec)
    return alpha_vec.array()
def testFG(obj, grid, level, function):
    node_values = None
    node_values_back = None
    alpha = None
    points = None
    p = None
    l_user = 2
    # generate a regular test grid
    generator = grid.createGridGenerator()
    generator.truncated(level, l_user)

    storage = grid.getStorage()
    dim = storage.dim()

    # generate the node_values vector
    fgs = FullGridSet(dim, level, l_user)
    node_values = DataVector(storage.size())
    for i in xrange(fgs.getSize()):
        fg = fgs.at(i)
        m = fg.getSize()
        for j in xrange(m):
            points = fg.getCoordsString(j).split()
            d = evalFunction(function, points)
            fg.set(j, d)
    fgs.reCompose(storage, node_values)
    createOperationHierarchisation(grid).doHierarchisation(node_values)
    evalOp = createOperationEval(grid)
    p = DataVector(dim)
    # Extensions in C and C++ -- Extension modules and extension types can be written by hand. There are also tools that help with this, for example, SWIG, sip, Pyrex. perationEval()
    for m in range(10):
        points = []
        for k in range(dim):
            p[k] = random.random()
            points.append(str(p[k]))
        for j in range(fgs.getSize()):
            fg = fgs.at(j)
            fg.eval(p)
        if (abs(evalOp.eval(node_values, p) - evalFunction(function, points)) >
                0.01):
            print points
            print evalOp.eval(node_values, p)
            print evalFunction(function, points)
            obj.fail()
        obj.failUnlessAlmostEqual(evalOp.eval(node_values, p),
                                  fgs.combinedResult())
def testFG(obj, grid, level, function):
    node_values = None
    node_values_back = None
    alpha = None
    points = None
    p = None
    l_user=2;
    # generate a regular test grid
    generator = grid.createGridGenerator()
    generator.truncated(level,l_user)

    storage = grid.getStorage()
    dim = storage.dim()

    # generate the node_values vector
    fgs = FullGridSet(dim,level, l_user)
    node_values = DataVector(storage.size())
    for i in xrange(fgs.getSize()):
        fg=fgs.at(i)  
        m=fg.getSize()
        for j in xrange(m):
             points=fg.getCoordsString(j).split()        
             d=evalFunction(function, points)          
             fg.set(j,d) 
    fgs.reCompose(storage,node_values)     
    createOperationHierarchisation(grid).doHierarchisation(node_values);
    evalOp = createOperationEval(grid)
    p=DataVector(dim)
# Extensions in C and C++ -- Extension modules and extension types can be written by hand. There are also tools that help with this, for example, SWIG, sip, Pyrex. perationEval()
    for m in range(10):
        points = []
        for k in range(dim):
	    p[k]=random.random()
            points.append(str(p[k]))
        for j in range(fgs.getSize()):
            fg=fgs.at(j)  
            fg.eval(p)
        if (abs(evalOp.eval(node_values, p)-evalFunction(function,points))>0.01):  
            print points
            print evalOp.eval(node_values,p)
            print evalFunction(function,points)
            obj.fail()
        obj.failUnlessAlmostEqual(evalOp.eval(node_values,p),fgs.combinedResult())
예제 #7
0
def interpolate(grid, f):
    """
    This helper functions cmoputes the coefficients of a sparse grid
    function for a given function

    Arguments:
    grid -- Grid sparse grid from pysgpp
    f -- function to be interpolated

    Return DataVector coefficients of the sparse grid function
    """
    gs = grid.getStorage()
    alpha = DataVector(gs.getSize())
    p = DataVector(gs.getDimension())
    for i in range(gs.getSize()):
        gs.getCoordinates(gs.getPoint(i), p)
        alpha[i] = f(p)
    createOperationHierarchisation(grid).doHierarchisation(alpha)
    return alpha
예제 #8
0
 def __init__(self, d, f):
   self.f = f
   self.d = d
   self.grid = pysgpp.Grid.createBsplineClenshawCurtisGrid(d, 3)
   self.gridStorage = self.grid.getStorage()
   try :
     self.hierarch = pysgpp.createOperationHierarchisation(self.grid)
   except :
     self.hierarch = pysgpp.createOperationMultipleHierarchisation(self.grid)
   self.opeval = pysgpp.createOperationEvalNaive(self.grid)
   self.alpha = pysgpp.DataVector(self.gridStorage.getSize())
예제 #9
0
def interpolate(f, level, dim, gridType=GridType_Linear, deg=2, trans=None):
    # create a two-dimensional piecewise bi-linear grid
    if gridType == GridType_PolyBoundary:
        grid = Grid.createPolyBoundaryGrid(dim, deg)
    elif gridType == GridType_Poly:
        grid = Grid.createPolyGrid(dim, deg)
    elif gridType == GridType_Linear:
        grid = Grid.createLinearGrid(dim)
    elif gridType == GridType_LinearBoundary:
        grid = Grid.createLinearBoundaryGrid(dim, 1)
    else:
        raise AttributeError

    gridStorage = grid.getStorage()

    # create regular grid
    grid.getGenerator().regular(level)

    # create coefficient vector
    alpha = DataVector(gridStorage.getSize())
    alpha.setAll(0.0)

    # set function values in alpha
    x = DataVector(dim)
    for i in range(gridStorage.getSize()):
        gp = gridStorage.getPoint(i)
        gridStorage.getCoordinates(gp, x)
        p = x.array()

        if trans is not None:
            p = trans.unitToProbabilistic(p)

        if gridStorage.getDimension() == 1:
            p = p[0]
        alpha[i] = f(p)

    # hierarchize
    createOperationHierarchisation(grid).doHierarchisation(alpha)

    return grid, alpha
예제 #10
0
def hierarchize(grid, nodalValues, isConsistent=True, ignore=None):
    try:
        # if ignore is None or len(ignore) > 0:
        maxLevel = grid.getStorage().getMaxLevel()
        if grid.getType() in [
                GridType_Bspline, GridType_BsplineClenshawCurtis,
                GridType_BsplineBoundary, GridType_ModBsplineClenshawCurtis,
                GridType_ModBspline
        ]:
            opHier = createOperationMultipleHierarchisation(grid)
        elif maxLevel > 1 and \
             grid.getType() in [GridType_LinearBoundary,
                                GridType_LinearClenshawCurtisBoundary,
                                GridType_PolyBoundary,
                                GridType_PolyClenshawCurtisBoundary]:
            opHier = createOperationArbitraryBoundaryHierarchisation(grid)
        else:
            opHier = createOperationHierarchisation(grid)

        alpha_vec = DataVector(nodalValues)
        opHier.doHierarchisation(alpha_vec)

        alpha = np.array(alpha_vec.array())

        del alpha_vec

        return alpha


#         print( "using brute force hierarchization" )
#         return hierarchizeBruteForce(grid, nodalValues, ignore)
    except Exception as e:
        print(e)
    print("something went wrong during hierarchization")
    import ipdb
    ipdb.set_trace()
    return hierarchizeBruteForce(grid, nodalValues, ignore)
예제 #11
0
print("number of grid points: {}".format(gridStorage.getSize()))

## Calculate the surplus vector alpha for the interpolant of \f$
## f(x)\f$.  Since the function can be evaluated at any
## point. Hence. we simply evaluate it at the coordinates of the
## grid points to obtain the nodal values. Then we use
## hierarchization to obtain the surplus value.

# create coefficient vector
alpha = pysgpp.DataVector(gridStorage.getSize())
for i in range(gridStorage.getSize()):
    gp = gridStorage.getPoint(i)
    p = tuple([gp.getStandardCoordinate(j) for j in range(dim)])
    alpha[i] = f(p)

pysgpp.createOperationHierarchisation(grid).doHierarchisation(alpha)

## Now we compute and compare the quadrature using four different methods available in SG++.

# direct quadrature
opQ = pysgpp.createOperationQuadrature(grid)
res = opQ.doQuadrature(alpha)
print("exact integral value:  {}".format(res))

# Monte Carlo quadrature using 100000 paths
opMC = pysgpp.OperationQuadratureMC(grid, 100000)
res = opMC.doQuadrature(alpha)
print("Monte Carlo value:     {:.6f}".format(res))
res = opMC.doQuadrature(alpha)
print("Monte Carlo value:     {:.6f}".format(res))
예제 #12
0
print "dimensionality:                   {}".format(dim)

# create regular grid, level 3
level = 3
gridGen = grid.createGridGenerator()
gridGen.regular(level)
print "number of initial grid points:    {}".format(gridStorage.size())

# definition of function to interpolate - nonsymmetric(!)
f = lambda x0, x1: 16.0 * (x0 - 1) * x0 * (x1 - 1) * x1 * x1
# create coefficient vector
alpha = DataVector(gridStorage.size())
print "length of alpha vector:           {}".format(alpha.getSize())

# now refine adaptively 5 times
for refnum in range(5):
    # set function values in alpha
    for i in xrange(gridStorage.size()):
        gp = gridStorage.get(i)
        alpha[i] = f(gp.getCoord(0), gp.getCoord(1))

    # hierarchize
    createOperationHierarchisation(grid).doHierarchisation(alpha)

    # refine a single grid point each time
    gridGen.refine(SurplusRefinementFunctor(alpha, 1))
    print "refinement step {}, new grid size: {}".format(
        refnum + 1, gridStorage.size())

    # extend alpha vector (new entries uninitialized)
    alpha.resize(gridStorage.size())
예제 #13
0
def addConst(grid, alpha, c):
    opHier = createOperationHierarchisation(grid)
    opHier.doDehierarchisation(alpha)
    alpha.add(DataVector([c] * len(alpha)))
    opHier.doHierarchisation(alpha)
grid = Grid.createLinearGrid(dim)
gridStorage = grid.getStorage()
print "dimensionality:        {}".format(dim)

# create regular grid, level 3
level = 3
gridGen = grid.createGridGenerator()
gridGen.regular(level)
print "number of grid points: {}".format(gridStorage.size())

# create coefficient vector
alpha = DataVector(gridStorage.size())
for i in xrange(gridStorage.size()):
    gp = gridStorage.get(i)
    alpha[i] = f((gp.getCoord(0), gp.getCoord(1)))
createOperationHierarchisation(grid).doHierarchisation(alpha)

# direct quadrature
opQ = createOperationQuadrature(grid)
res = opQ.doQuadrature(alpha)
print "exact integral value:  {}".format(res)

# Monte Carlo quadrature using 100000 paths
opMC = OperationQuadratureMC(grid, 100000)
res = opMC.doQuadrature(alpha)
print "Monte Carlo value:     {:.6f}".format(res)
res = opMC.doQuadrature(alpha)
print "Monte Carlo value:     {:.6f}".format(res)

# Monte Carlo quadrature of a function
res = opMC.doQuadratureFunc(f)
예제 #15
0
def addConst(grid, alpha, c):
    opHier = createOperationHierarchisation(grid)
    opHier.doDehierarchisation(alpha)
    alpha.add(DataVector([c] * len(alpha)))
    opHier.doHierarchisation(alpha)
예제 #16
0
    def test_variance_opt(self):
        # parameters
        level = 4

        gridConfig = RegularGridConfiguration()
        gridConfig.type_ = GridType_Linear
        gridConfig.maxDegree_ = 2  # max(2, level + 1)
        gridConfig.boundaryLevel_ = 0
        gridConfig.dim_ = 2

        # mu = np.ones(gridConfig.dim_) * 0.5
        # cov = np.diag(np.ones(gridConfig.dim_) * 0.1 / 10.)
        # dist = MultivariateNormal(mu, cov, 0, 1)  # problems in 3d/l2
        # f = lambda x: dist.pdf(x)
        def f(x):
            return np.prod(4 * x * (1 - x))

        def f(x):
            return np.arctan(
                50 *
                (x[0] - .35)) + np.pi / 2 + 4 * x[1]**3 + np.exp(x[0] * x[1] -
                                                                 1)

        # --------------------------------------------------------------------------
        # define parameters
        paramsBuilder = ParameterBuilder()
        up = paramsBuilder.defineUncertainParameters()
        for idim in range(gridConfig.dim_):
            up.new().isCalled("x_%i" % idim).withBetaDistribution(3, 3, 0, 1)
        params = paramsBuilder.andGetResult()
        U = params.getIndependentJointDistribution()
        T = params.getJointTransformation()
        # --------------------------------------------------------------------------

        grid = pysgpp.Grid.createGrid(gridConfig)
        gs = grid.getStorage()
        grid.getGenerator().regular(level)
        nodalValues = np.ndarray(gs.getSize())

        p = DataVector(gs.getDimension())
        for i in range(gs.getSize()):
            gp = gs.getCoordinates(gs.getPoint(i), p)
            nodalValues[i] = f(p.array())

        # --------------------------------------------------------------------------
        alpha_vec = pysgpp.DataVector(nodalValues)
        pysgpp.createOperationHierarchisation(grid).doHierarchisation(
            alpha_vec)
        alpha = alpha_vec.array()
        checkInterpolation(grid, alpha, nodalValues, epsilon=1e-13)
        # --------------------------------------------------------------------------

        quad = AnalyticEstimationStrategy()
        mean = quad.mean(grid, alpha, U, T)["value"]
        var = quad.var(grid, alpha, U, T, mean)["value"]

        if self.verbose:
            print("mean: %g" % mean)
            print("var : %g" % var)
            print("-" * 80)

        # drop arbitrary grid points and compute the mean and the variance
        # -> just use leaf nodes for simplicity
        bilinearForm = BilinearGaussQuadratureStrategy(grid.getType())
        bilinearForm.setDistributionAndTransformation(U.getDistributions(),
                                                      T.getTransformations())
        linearForm = LinearGaussQuadratureStrategy(grid.getType())
        linearForm.setDistributionAndTransformation(U.getDistributions(),
                                                    T.getTransformations())

        i = np.random.randint(0, gs.getSize())
        gpi = gs.getPoint(i)
        # --------------------------------------------------------------------------
        # check refinement criterion
        ranking = ExpectationValueOptRanking()
        mean_rank = ranking.rank(grid, gpi, alpha, params)
        if self.verbose:
            print("rank mean: %g" % (mean_rank, ))
        # --------------------------------------------------------------------------
        # check refinement criterion
        ranking = VarianceOptRanking()
        var_rank = ranking.rank(grid, gpi, alpha, params)
        if self.verbose:
            print("rank var:  %g" % (var_rank, ))
        # --------------------------------------------------------------------------
        # remove one grid point and update coefficients
        toBeRemoved = IndexList()
        toBeRemoved.push_back(i)
        ixs = gs.deletePoints(toBeRemoved)
        gpsj = []
        new_alpha = np.ndarray(gs.getSize())
        for j in range(gs.getSize()):
            new_alpha[j] = alpha[ixs[j]]
            gpsj.append(gs.getPoint(j))
        # --------------------------------------------------------------------------
        # compute the mean and the variance of the new grid
        mean_trunc = quad.mean(grid, new_alpha, U, T)["value"]
        var_trunc = quad.var(grid, new_alpha, U, T, mean_trunc)["value"]
        basis = getBasis(grid)

        # compute the covariance
        A, _ = bilinearForm.computeBilinearFormByList(gs, [gpi], basis, gpsj,
                                                      basis)
        b, _ = linearForm.computeLinearFormByList(gs, gpsj, basis)

        mean_uwi_phii = np.dot(new_alpha, A[0, :])
        mean_phii, _ = linearForm.getLinearFormEntry(gs, gpi, basis)
        mean_uwi = np.dot(new_alpha, b)
        cov_uwi_phii = mean_uwi_phii - mean_phii * mean_uwi

        # compute the variance of phi_i
        firstMoment, _ = linearForm.getLinearFormEntry(gs, gpi, basis)
        secondMoment, _ = bilinearForm.getBilinearFormEntry(
            gs, gpi, basis, gpi, basis)
        var_phii = secondMoment - firstMoment**2

        # update the ranking
        var_estimated = var_trunc + alpha[i]**2 * var_phii + 2 * alpha[
            i] * cov_uwi_phii

        mean_diff = np.abs(mean_trunc - mean)
        var_diff = np.abs(var_trunc - var)

        if self.verbose:
            print("-" * 80)
            print("diff: |var - var_estimated| = %g" %
                  (np.abs(var - var_estimated), ))
            print("diff: |var - var_trunc|     = %g = %g = var opt ranking" %
                  (var_diff, var_rank))
            print("diff: |mean - mean_trunc|   = %g = %g = mean opt ranking" %
                  (mean_diff, mean_rank))

        self.assertTrue(np.abs(var - var_estimated) < 1e-14)
        self.assertTrue(np.abs(mean_diff - mean_rank) < 1e-14)
        self.assertTrue(np.abs(var_diff - var_rank) < 1e-14)
예제 #17
0
    def tesst_squared(self):
        # parameters
        level = 3

        gridConfig = RegularGridConfiguration()
        gridConfig.type_ = GridType_Linear
        gridConfig.maxDegree_ = 2  # max(2, level + 1)
        gridConfig.boundaryLevel_ = 0
        gridConfig.dim_ = 2

        def f(x):
            return np.prod(8 * x * (1 - x))

        # --------------------------------------------------------------------------
        # define parameters
        paramsBuilder = ParameterBuilder()
        up = paramsBuilder.defineUncertainParameters()
        for idim in range(gridConfig.dim_):
            up.new().isCalled("x_%i" % idim).withUniformDistribution(0, 1)
        params = paramsBuilder.andGetResult()
        U = params.getIndependentJointDistribution()
        T = params.getJointTransformation()
        # --------------------------------------------------------------------------

        grid = pysgpp.Grid.createGrid(gridConfig)
        gs = grid.getStorage()
        grid.getGenerator().regular(level)
        nodalValues = np.ndarray(gs.getSize())
        weightedNodalValues = np.ndarray(gs.getSize())

        p = DataVector(gs.getDimension())
        for i in range(gs.getSize()):
            gp = gs.getCoordinates(gs.getPoint(i), p)
            nodalValues[i] = f(p.array())**2
            weightedNodalValues[i] = f(p.array())**2 * U.pdf(
                T.unitToProbabilistic(p))

        # --------------------------------------------------------------------------
        alpha_vec = pysgpp.DataVector(nodalValues)
        pysgpp.createOperationHierarchisation(grid).doHierarchisation(
            alpha_vec)
        alpha = alpha_vec.array()
        checkInterpolation(grid, alpha, nodalValues, epsilon=1e-13)
        # --------------------------------------------------------------------------
        alpha_vec = pysgpp.DataVector(weightedNodalValues)
        pysgpp.createOperationHierarchisation(grid).doHierarchisation(
            alpha_vec)
        weightedAlpha = alpha_vec.array()
        checkInterpolation(grid,
                           weightedAlpha,
                           weightedNodalValues,
                           epsilon=1e-13)
        # --------------------------------------------------------------------------
        #         np.random.seed(1234567)

        i = np.random.randint(0, gs.getSize())
        gpi = gs.getPoint(i)

        gs.getCoordinates(gpi, p)
        print(evalSGFunction(grid, alpha, p.array()))
        print(evalSGFunctionBasedOnParents(grid, alpha, gpi))

        # --------------------------------------------------------------------------
        # check refinement criterion
        ranking = SquaredSurplusRanking()
        squared_surplus_rank = ranking.rank(grid, gpi, weightedAlpha, params)
        if self.verbose:
            print("rank squared surplus: %g" % (squared_surplus_rank, ))
        # --------------------------------------------------------------------------
        # check refinement criterion
        ranking = AnchoredMeanSquaredOptRanking()
        anchored_mean_squared_rank = ranking.rank(grid, gpi, alpha, params)
        if self.verbose:
            print("rank mean squared   : %g" % (anchored_mean_squared_rank, ))
예제 #18
0
    def test_anchored_variance_opt(self):
        # parameters
        level = 4

        gridConfig = RegularGridConfiguration()
        gridConfig.type_ = GridType_Linear
        gridConfig.maxDegree_ = 2  # max(2, level + 1)
        gridConfig.boundaryLevel_ = 0
        gridConfig.dim_ = 2

        # mu = np.ones(gridConfig.dim_) * 0.5
        # cov = np.diag(np.ones(gridConfig.dim_) * 0.1 / 10.)
        # dist = MultivariateNormal(mu, cov, 0, 1)  # problems in 3d/l2
        # f = lambda x: dist.pdf(x)
        def f(x):
            return np.prod(4 * x * (1 - x))

        def f(x):
            return np.arctan(
                50 *
                (x[0] - .35)) + np.pi / 2 + 4 * x[1]**3 + np.exp(x[0] * x[1] -
                                                                 1)

        # --------------------------------------------------------------------------
        # define parameters
        paramsBuilder = ParameterBuilder()
        up = paramsBuilder.defineUncertainParameters()
        for idim in range(gridConfig.dim_):
            up.new().isCalled("x_%i" % idim).withBetaDistribution(3, 3, 0, 1)
        params = paramsBuilder.andGetResult()
        U = params.getIndependentJointDistribution()
        T = params.getJointTransformation()
        # --------------------------------------------------------------------------

        grid = pysgpp.Grid.createGrid(gridConfig)
        gs = grid.getStorage()
        grid.getGenerator().regular(level)
        nodalValues = np.ndarray(gs.getSize())

        p = DataVector(gs.getDimension())
        for i in range(gs.getSize()):
            gp = gs.getCoordinates(gs.getPoint(i), p)
            nodalValues[i] = f(p.array())

        # --------------------------------------------------------------------------
        alpha_vec = pysgpp.DataVector(nodalValues)
        pysgpp.createOperationHierarchisation(grid).doHierarchisation(
            alpha_vec)
        alpha = alpha_vec.array()
        checkInterpolation(grid, alpha, nodalValues, epsilon=1e-13)
        # --------------------------------------------------------------------------
        i = np.random.randint(0, gs.getSize())
        gpi = gs.getPoint(i)
        # --------------------------------------------------------------------------
        # check refinement criterion
        ranking = AnchoredVarianceOptRanking()
        var_rank = ranking.rank(grid, gpi, alpha, params)
        if self.verbose:
            print("rank anchored var:  %g" % (var_rank, ))
        # --------------------------------------------------------------------------
        # compute the mean and the variance of the new grid
        x = DataVector(gs.getDimension())
        gs.getCoordinates(gpi, x)
        x = x.array()
        uwxi = evalSGFunction(grid, alpha, x) - alpha[i]
        fx = U.pdf(T.unitToProbabilistic(x))

        var_rank_estimated = np.abs(
            (fx - fx**2) * (-alpha[i]**2 - 2 * alpha[i] * uwxi))

        if self.verbose:
            print("rank anchored var:  %g" % (var_rank_estimated, ))

        if self.verbose:
            print("-" * 80)
            print("diff: |var - var_estimated| = %g" %
                  (np.abs(var_rank - var_rank_estimated), ))