示例#1
0
def example4():
    ## After wrapping our new function into a pysgpp.MultiFunction, we create a FunctionLookupTable.
    ## This will cache the function values by their DataVector parameter and use cached values if available.
    ## Note, however, that even slightly differing DataVectors will lead to separate function evaluations.
    loggingFunc = pysgpp.multiFunc(loggingF)
    lookupTable = pysgpp.FunctionLookupTable(loggingFunc)
    operation = pysgpp.CombigridOperation.createLinearLejaQuadrature(
        d, lookupTable.toMultiFunction())

    ## Do a normal computation...
    result = operation.evaluate(2)
    print("Result computed: " + str(result))

    ## The first (and most convenient) possibility to store the data is serializing the lookup table.
    ## The serialization is not compressed and will roughly use 60 Bytes per entry. If you have lots
    ## of data, you might consider compressing it.
    pysgpp.writeToFile("lookupTable.log", lookupTable.serialize())

    ## It is also possible to store which levels have been evaluated:
    pysgpp.writeToFile(
        "levels.log",
        operation.getLevelManager().getSerializedLevelStructure())

    ## Restore the data into another lookup table. The function is still needed for new evaluations.
    restoredLookupTable = pysgpp.FunctionLookupTable(loggingFunc)
    restoredLookupTable.deserialize(pysgpp.readFromFile("lookupTable.log"))
    operation2 = pysgpp.CombigridOperation.createLinearLejaQuadrature(
        d, restoredLookupTable.toMultiFunction())

    ## A new evaluation with the same levels does not require new function evaluations:
    operation2.getLevelManager().addLevelsFromSerializedStructure(
        pysgpp.readFromFile("levels.log"))
    result = operation2.getResult()
    print("Result computed (2nd time): " + str(result))

    ## Another less general way of storing the data is directly serializing the storage underlying the
    ## operation. This means that retrieval is faster, but it only works if the same grid is used
    ## again.
    ## For demonstration purposes, we use loggingFunc directly this time without a lookup table:
    pysgpp.writeToFile("storage.log", operation.getStorage().serialize())
    operation3 = pysgpp.CombigridOperation.createLinearLejaQuadrature(
        d, pysgpp.multiFunc(loggingFunc))
    operation3.getStorage().deserialize(pysgpp.readFromFile("storage.log"))
    result = operation3.evaluate(2)
    print("Result computed (3rd time): " + str(result))
示例#2
0
def plotRegularUniformFullGrid(level):
    func = pysgpp.multiFunc(f)
    numDims = 2
    operation = pysgpp.CombigridMultiOperation.createExpUniformBoundaryLinearInterpolation(numDims, func)
    levelManager = operation.getLevelManager()
    levelManager.addRegularLevels(level-1)
    
    P = levelManager.getAllGridPoints()
    plt.plot([0,0,1,1,0],[0,1,1,0,0],'k')
    for i in range(len(P)):
        point=P[i]
        plt.plot(point[0], point[1], 'o',color=(32/255.0,86/255.0,174/255.0), markersize=20)
    plt.axis('equal')
    plt.xlim(xmin=-0.1, xmax=1.1)
    plt.ylim(ymin=-0.1, ymax=1.1)
    plt.axis('off')
示例#3
0
def plotRegularUniformSubGridGrey(levelMI):
    maxLevel = max(levelMI[0], levelMI[1])
    func = pysgpp.multiFunc(f)
    numDims = 2
    operation = pysgpp.CombigridOperation.createExpUniformBoundaryLinearInterpolation(numDims, func)
    levelManager = operation.getLevelManager()
    levelManager.addRegularLevels(maxLevel)
    fullGridEval = operation.getFullGridEval()
    P = fullGridEval.getGridPoints(levelMI)
    plt.plot([0,0,1,1,0],[0,1,1,0,0],color=(192/255.0,192/255.0,192/255.0))
    for i in range(len(P)):
        point=P[i]
        plt.plot(point[0], point[1], 'o', color=(192/255.0,192/255.0,192/255.0), markersize=10)
        
    plt.axis('equal')
    plt.xlim(xmin=-0.1, xmax=1.1)
    plt.ylim(ymin=-0.1, ymax=1.1)
    plt.axis('off')    
示例#4
0
文件: pce.py 项目: pengruifei/SGpp
def ct_to_pce():
    start_time = time.time()
    # initialize model function
    func = pysgpp.multiFunc(expModel)
    numDims = 2
    # regular sparse grid level q
    q = 6
    # create polynomial basis
    config = pysgpp.OrthogonalPolynomialBasis1DConfiguration()
    config.polyParameters.type_ = pysgpp.OrthogonalPolynomialBasisType_LEGENDRE
    basisFunction = pysgpp.OrthogonalPolynomialBasis1D(config)
    # create sprarse grid interpolation operation
    op = pysgpp.CombigridOperation.createExpClenshawCurtisPolynomialInterpolation(
        numDims, func)
    # start with regular level q and add some levels adaptively
    op.getLevelManager().addRegularLevels(q)
    op.getLevelManager().addLevelsAdaptiveByNumLevels(5)

    ##  and construct a PCE representation to easily calculate statistical features of our model.
    # create polynomial chaos surrogate from sparse grid
    surrogateConfig = pysgpp.CombigridSurrogateModelConfiguration()
    surrogateConfig.type = pysgpp.CombigridSurrogateModelsType_POLYNOMIAL_CHAOS_EXPANSION
    surrogateConfig.loadFromCombigridOperation(op)
    surrogateConfig.basisFunction = basisFunction
    pce = pysgpp.createCombigridSurrogateModel(surrogateConfig)
    # compute sobol indices
    sobol_indices = pysgpp.DataVector(1)
    total_indices = pysgpp.DataVector(1)
    pce.getComponentSobolIndices(sobol_indices)
    pce.getTotalSobolIndices(total_indices)
    # print results
    print("Mean: {} Variance: {}".format(pce.mean(), pce.variance()))
    print("Sobol indices {}".format(sobol_indices.toString()))
    print("Total Sobol indices {}".format(total_indices.toString()))
    print("Sum {}\n".format(sobol_indices.sum()))

    print("Elapsed time: {} s".format(time.time() - start_time))
示例#5
0
base = 0.1


## The first thing we need is a function to evaluate. This function will be evaluated on the domain
## \f$[0, 1]^d\f$. This particular function can be used with any number of dimensions.
## The input parameter of the function is of type pysgpp.DataVector, so do not treat it like a list.
## The return type is float.
def f(x):
    product = 1.0
    for i in range(x.getSize()):
        product *= math.exp(-pow(base, i) * x[i])
    return product


## We have to wrap f in a pysgpp.MultiFunction object.
func = pysgpp.multiFunc(f)


## comparison function
def compare():
    mydim = 5
    operation = pysgpp.CombigridOperation.createLinearLejaQuadrature(
        mydim, func)
    levelManager = operation.getLevelManager()
    idx = pysgpp.IndexVector([1 for i in range(mydim)])
    levelManager.addLevel(idx)

    result = operation.getResult()
    analyticalResult = reduce(mul, [
        pow(base, -i) * (1.0 - pow(math.e, -pow(base, i)))
        for i in range(mydim)
示例#6
0
                        type=int,
                        help="Leja growth factor")
    parser.add_argument('--levelManager',
                        default="variance",
                        type=str,
                        help="define level manager")
    parser.add_argument('--dist',
                        default="beta",
                        type=str,
                        help="define marginal distribution")
    args = parser.parse_args()

    model, params = buildModel(args.model, args.dist)

    # We have to wrap f in a pysgpp.MultiFunction object.
    func = pysgpp.multiFunc(lambda x: model(x, params))
    numDims = params.getStochasticDim()

    # compute reference values
    n = 10000
    x = params.getIndependentJointDistribution().rvs(n)
    y = np.array([model(xi, params) for xi in x])

    results = {}
    for gridType, levelManagerType, basisType in [
        ("ClenshawCurtis", "variance", "poly"),
        ("ClenshawCurtis", "averaging", "poly"),
        ("UniformBoundary", "averaging", "bspline"),
        ("UniformBoundary", "regular", "bspline"),
        ("L2Leja", "variance", "poly"),
            #             ("Leja", "variance", "poly"),
示例#7
0
def adaptiveGridToRegularGrid(numDims,
                              level,
                              refnums,
                              f,
                              numSamples=1000,
                              plot=False,
                              verbose=False):
    """
    Converts a regular sparse grid function to a sparse grid in the
    combination technique and back.

    Arguments:
    numDims -- int number of dimensions
    level -- level of the sparse grid
    refnums -- int number of refinement steps
    f -- function to be interpolated
    numSamples -- int number of random samples on which we evaluate the different sparse grid
                  functions to validate the grid conversion
    plot -- bool whether the sparse grid functions are plotted or not (just for numDims=1)
    verbose -- bool verbosity
    """
    ## We generate a iid of uniform samples, which we are going to use to
    ## validate the grid conversion
    x = np.random.rand(numSamples, numDims)
    parameters = DataMatrix(x)

    ## We create a regular sparse grid as usual and...
    grid = Grid.createLinearGrid(numDims)
    grid.getGenerator().regular(level)
    alpha = interpolate(grid, f)

    ## ... refine it adaptively
    grid_adaptive = grid.clone()
    alpha_adaptive = DataVector(alpha)
    refineGrid(grid_adaptive, alpha_adaptive, f, refnums)

    ## We apply now both methods of the grid conversion on the
    ## adaptively refined grid. The first conversion considers all
    ## levels where at least one sparse grid point exists, while the
    ## second one considers just complete subspaces.
    treeStorage_all = convertHierarchicalSparseGridToCombigrid(
        grid_adaptive.getStorage(), GridConversionTypes_ALLSUBSPACES)
    treeStorage_complete = convertHierarchicalSparseGridToCombigrid(
        grid_adaptive.getStorage(), GridConversionTypes_COMPLETESUBSPACES)

    ## We initialize the CombigridOperation on a grid that spans the
    ## same function space as the original hierarchical sparse grid:
    ## hat basis on an equidistant grids without boundary points.
    func = multiFunc(f)
    opt_all = CombigridMultiOperation.createExpUniformLinearInterpolation(
        numDims, func)
    opt_complete = CombigridMultiOperation.createExpUniformLinearInterpolation(
        numDims, func)

    ## The CombigridOperation expects the points at which you want to
    ## evaluate the interpolant as DataMatrix with the shape (numDims
    ## x numSamples). We, therefore, need to transpose the samples and
    ## initialize the multi operation with them. To set the level
    ## structure we initialize the level manager of the operation with
    ## the storage we have obtained after the conversion.
    parameters.transpose()
    opt_all.setParameters(parameters)
    opt_all.getLevelManager().addLevelsFromStructure(treeStorage_all)
    opt_complete.setParameters(parameters)
    opt_complete.getLevelManager().addLevelsFromStructure(treeStorage_complete)
    parameters.transpose()

    ## If you want you can examine the levels of the combination
    ## technique...
    if verbose:
        print("-" * 80)
        print("just full levels:")
        print(opt_complete.getLevelManager().getSerializedLevelStructure())
        print("-" * 80)
        print("all levels:")
        print(opt_all.getLevelManager().getSerializedLevelStructure())
        print("-" * 80)

    ## We start to transform the grids from the combination technique
    ## back to their hierarchical formulation. We, again, create a
    ## grid with a piecewise d-linear basis and initialize the grid
    ## points in its storage by the ones available in the levels of
    ## the combination technique. We do it first for the combination
    ## grids that just contain just those levels where the original
    ## sparse grid had complete subpsaces...
    grid_complete = Grid.createLinearGrid(numDims)
    treeStorage_complete = opt_complete.getLevelManager().getLevelStructure()
    convertCombigridToHierarchicalSparseGrid(treeStorage_complete,
                                             grid_complete.getStorage())

    ## ... and do the same for the version where we considered all
    ## subspaces where at least one grid point was located.
    grid_all = Grid.createLinearGrid(numDims)
    treeStorage_all = opt_all.getLevelManager().getLevelStructure()
    convertCombigridToHierarchicalSparseGrid(treeStorage_all,
                                             grid_all.getStorage())

    ## we interpolate now f on the new grids and...
    alpha_complete = interpolate(grid_complete, f)
    alpha_all = interpolate(grid_all, f)

    ## ... evaluate all the surrogate functions we have so far
    y_sg_regular = DataVector(numSamples)
    createOperationMultipleEval(grid, parameters).eval(alpha, y_sg_regular)

    y_sg_adaptive = DataVector(numSamples)
    createOperationMultipleEval(grid_adaptive,
                                parameters).eval(alpha_adaptive, y_sg_adaptive)

    y_sg_all = DataVector(numSamples)
    createOperationMultipleEval(grid_all, parameters).eval(alpha_all, y_sg_all)

    y_sg_complete = DataVector(numSamples)
    createOperationMultipleEval(grid_complete,
                                parameters).eval(alpha_complete, y_sg_complete)

    y_ct_all = opt_all.getResult()
    y_ct_complete = opt_complete.getResult()

    ## For convenience we use flattened numpy arrays to test if the
    ## function values at each point are the same.
    y_sg_regular = y_sg_regular.array().flatten()
    y_sg_adaptive = y_sg_adaptive.array().flatten()
    y_ct_all = y_ct_all.array().flatten()
    y_ct_complete = y_ct_complete.array().flatten()
    y_sg_all = y_sg_all.array().flatten()
    y_sg_complete = y_sg_complete.array().flatten()

    ## If you want, you can plot the results if the problem is one dimensional
    if plot and numDims == 1:
        x = x.flatten()
        ixs = np.argsort(x)
        plt.figure()
        plt.plot(x[ixs], y_sg_regular[ixs], label="sg regular")
        plt.plot(x[ixs], y_sg_adaptive[ixs], label="sg adaptive")
        plt.plot(x[ixs], y_ct_complete[ixs], label="ct full")
        plt.plot(x[ixs], y_ct_all[ixs], label="ct all")
        plt.plot(x[ixs], y_sg_complete[ixs], label="sg full")
        plt.plot(x[ixs], y_sg_all[ixs], label="sg all")
        plt.legend()
        plt.show()

    ## All the function values should not be equivalent if...
    if grid_complete.getSize() < grid_all.getSize():
        assert np.sum((y_ct_complete - y_ct_all)**2) > 1e-14
        assert np.sum((y_sg_regular - y_sg_all)**2) > 1e-14

    ## and should be equivalent if...
    if grid_complete.getSize() == grid.getSize():
        assert np.sum((y_ct_complete - y_sg_regular)**2) < 1e-14
        assert np.sum((y_sg_complete - y_sg_regular)**2) < 1e-14

    ## For the grid sizes it must hold that
    assert grid_adaptive.getSize() > grid.getSize()
    assert grid_complete.getSize() <= grid_adaptive.getSize()
    assert grid_all.getSize() >= grid.getSize()
                        type=int,
                        help="minimum level of regular grids")
    parser.add_argument('--maxLevel',
                        default=8,
                        type=int,
                        help="maximum level of regular grids")
    parser.add_argument('--dist',
                        default="beta",
                        type=str,
                        help="define marginal distribution")
    args = parser.parse_args()

    #model, params = buildModel(args.model, args.dist)
    model = arctanModel

    func = pysgpp.multiFunc(lambda x: model(x))
    numDims = 2  # params.getStochasticDim()

    # compute reference values
    mean = dblquad(lambda x, y: model([x, y]),
                   0,
                   1,
                   lambda x: 0,
                   lambda x: 1,
                   epsabs=1e-14)
    meanSquare = dblquad(lambda x, y: model([x, y])**2,
                         0,
                         1,
                         lambda x: 0,
                         lambda x: 1,
                         epsabs=1e-14)