Exemple #1
0
def evaluate(X_tr, y_tr, X_te, y_te, interactions=None):
    grid = sg.RegularGridConfiguration()
    grid.dim_ = 64
    grid.level_ = 2
    grid.type_ = sg.GridType_ModLinear

    adapt = sg.AdaptivityConfiguration()
    adapt.numRefinements_ = 0
    adapt.noPoints_ = 0

    solv = sg.SLESolverConfiguration()
    solv.maxIterations_ = 50
    solv.eps_ = 10e-6
    solv.threshold_ = 10e-6
    solv.type_ = sg.SLESolverType_CG

    final_solv = solv
    final_solv.maxIterations = 200

    regular = sg.RegularizationConfiguration()
    regular.type_ = sg.RegularizationType_Identity
    regular.exponentBase_ = 1.0
    regular.lambda_ = 0.1    

    X_tr = sg.DataMatrix(X_tr)
    y_tr = sg.DataVector(y_tr)
    X_te = sg.DataMatrix(X_te)
    y_te = sg.DataVector(y_te)
    
    if interactions is None:
        estimator = sg.ClassificationLearner(grid, adapt, solv, final_solv,regular)
    else:
        estimator = sg.ClassificationLearner(grid, adapt, solv, final_solv,regular, interactions)
    estimator.train(X_tr,y_tr)
    return estimator.getAccuracy(X_te,y_te)
Exemple #2
0
def test_laplace(grid, lmax):
    resolution = 100000
    grid.getGenerator().regular(lmax)
    gridStorage = grid.getStorage()
    size = gridStorage.getSize()
    b = getBasis(grid)
    op = pysgpp.createOperationLaplace(grid)
    alpha = pysgpp.DataVector(size)
    result = pysgpp.DataVector(size)

    for point_i in range(size):
        for point_j in range(size):
            gp_i = gridStorage.getPoint(point_i)
            gp_j = gridStorage.getPoint(point_j)
            print("--------")
            for i in range(0, size):
                alpha[i] = 0
            alpha[point_i] = 1
            op.mult(alpha, result)
            xs = np.linspace(0, 1, resolution)
            approx = sum([
                b.evalDx(gp_i.getLevel(0), gp_i.getIndex(0), x) *
                b.evalDx(gp_j.getLevel(0), gp_j.getIndex(0), x) for x in xs
            ]) / resolution
            print("i,j: {},{} result: {} approx:{}".format(
                point_i, point_j, result[point_j], approx))
            if (abs(result.get(point_j) - approx) > 1e-1):
                print("--------")
                print("points: {},{} ".format(point_i, point_j))
                print("approx:{}".format(approx))
                print("result:{}".format(result.get(point_j)))
                # print result
                print("--------")
def getNextTodoPoints(maxPoints, precalculatedValues, dim, gridType, degree,
                      numTimeSteps, gridResolution, normalization, residual,
                      wave_type, distribution, minimum_allowed_height):
    okushiriFunc = okushiri(dim, numTimeSteps, gridResolution, normalization,
                            residual, wave_type, distribution,
                            minimum_allowed_height)
    lb, ub = okushiriFunc.getDomain()
    objFunc = vectorObjFuncSGpp(okushiriFunc)
    pdfs = objFunc.getDistributions()
    reSurf = pysgpp.SplineResponseSurfaceVector(
        objFunc, pysgpp.DataVector(lb), pysgpp.DataVector(ub),
        pysgpp.Grid.stringToGridType(gridType), degree)
    reSurf.regular(initialLevel)
    todoPointsDetermined = False
    counter = 0

    while not todoPointsDetermined:
        previousSize = reSurf.getSize()
        if previousSize > maxPoints:
            print(
                f"nothing to calculate for a maximum of {maxPoints} grid points"
            )
            return todoPointsDetermined, [], reSurf.getSize()

        reSurf.nextSurplusAdaptiveGrid(numRefine, verbose)
        #reSurf.nextDistributionAdaptiveGrid(numRefine, pdfs, verbose)
        todoPointsDetermined, todoPoints = checkPrecalc(
            reSurf, precalculatedValues)
        if not todoPointsDetermined:
            counter = counter + 1
            print(f"refining ({counter}), grid size: {reSurf.getSize()}")
            reSurf.refineSurplusAdaptive(numRefine, verbose)
            #reSurf.refineDistributionAdaptive(numRefine, pdfs, verbose)
    return todoPointsDetermined, todoPoints, reSurf.getSize()
def evaluate_one(estimator, X, y, train, test):
    train_X = sg.DataMatrix(X[train])
    train_y = sg.DataVector(y[train])
    test_X = sg.DataMatrix(X[test])
    test_y = sg.DataVector(y[test])
    estimator.train(train_X, train_y)
    error = estimator.getMSE(test_X, test_y)
    return error
Exemple #5
0
 def getDomain(self):
     lb = pysgpp.DataVector(self.dim)
     ub = pysgpp.DataVector(self.dim)
     for d in range(self.dim):
         bounds = self.pdfs.get(d).getBounds()
         lb[d] = bounds[0]
         ub[d] = bounds[1]
     return lb, ub
Exemple #6
0
def generate_friedman1(seed):
    (X, y) = data.make_friedman1(n_samples=2000, random_state=seed, noise=1.0)

    # transform values to DataMatrix/DataVector types
    X = sg.DataMatrix(X)
    y = sg.DataVector(y)

    return (X, y)
Exemple #7
0
 def __init__(self, d, f):
   self.f = f
   self.d = d
   self.grid = pysgpp.Grid.createBsplineClenshawCurtisGrid(d, 3)
   self.gridStorage = self.grid.getStorage()
   try :
     self.hierarch = pysgpp.createOperationHierarchisation(self.grid)
   except :
     self.hierarch = pysgpp.createOperationMultipleHierarchisation(self.grid)
   self.opeval = pysgpp.createOperationEvalNaive(self.grid)
   self.alpha = pysgpp.DataVector(self.gridStorage.getSize())
Exemple #8
0
 def create_interpolation(self, grid_lvl):
   self.gridStorage.clear()
   self.grid.getGenerator().regular(grid_lvl)
   self.alpha = pysgpp.DataVector(self.gridStorage.getSize())
   self.min_f = float('inf')
   for i in range(self.gridStorage.getSize()):
     gp = self.gridStorage.getPoint(i)
     x = [self.gridStorage.getCoordinate(gp, j) for j in range(self.d)]
     self.alpha[i] = self.f(x)
     if self.alpha[i] < self.min_f:
         self.min_f = self.alpha[i]
         self.min_x = x
   self.hierarch.doHierarchisation(self.alpha)
Exemple #9
0
    def eval(self, x):
        # map x from [0,1]^D to real parameter space reallb, realub
        transX = pysgpp.DataVector(self.dim)
        for d in range(self.dim):
            transX[d] = self.reallb[d] + (self.realub[d] -
                                          self.reallb[d]) * x[d]
        # print(f"{x.toString()}")
        # print(f"{transX.toString()}\n")

        y = self.okushiriStorage.eval(transX)
        maxRunUp = np.max(y)
        time = float(np.argmax(y))
        return maxRunUp
Exemple #10
0
def test_LTwoDotImplicit(grid, l):
    grid.getGenerator().regular(l)
    gridStorage = grid.getStorage()
    size = gridStorage.getSize()
    # print(size)
    m = pysgpp.DataMatrix(size, size)
    opExplicit = pysgpp.createOperationLTwoDotExplicit(m, grid)
    op = pysgpp.createOperationLTwoDotProduct(grid)
    alpha = pysgpp.DataVector(size)
    resultExplicit = pysgpp.DataVector(size)
    result = pysgpp.DataVector(size)
    for i in range(size):
        alpha[i] = 1
    opExplicit.mult(alpha, resultExplicit)
    op.mult(alpha, result)
    for i in range(size):
        if result[i] != resultExplicit[i]:
            print("Error result entry {} differs".format(i))

        if abs(result[i] - resultExplicit[i]) > 1e-16:
            # print result[i] - resultExplicit[i]
            print("result:{}".format(result[i]))
            print("resultExplicit:{}".format(resultExplicit[i]))
Exemple #11
0
def ct_to_pce():
    start_time = time.time()
    # initialize model function
    func = pysgpp.multiFunc(expModel)
    numDims = 2
    # regular sparse grid level q
    q = 6
    # create polynomial basis
    config = pysgpp.OrthogonalPolynomialBasis1DConfiguration()
    config.polyParameters.type_ = pysgpp.OrthogonalPolynomialBasisType_LEGENDRE
    basisFunction = pysgpp.OrthogonalPolynomialBasis1D(config)
    # create sprarse grid interpolation operation
    op = pysgpp.CombigridOperation.createExpClenshawCurtisPolynomialInterpolation(
        numDims, func)
    # start with regular level q and add some levels adaptively
    op.getLevelManager().addRegularLevels(q)
    op.getLevelManager().addLevelsAdaptiveByNumLevels(5)

    ##  and construct a PCE representation to easily calculate statistical features of our model.
    # create polynomial chaos surrogate from sparse grid
    surrogateConfig = pysgpp.CombigridSurrogateModelConfiguration()
    surrogateConfig.type = pysgpp.CombigridSurrogateModelsType_POLYNOMIAL_CHAOS_EXPANSION
    surrogateConfig.loadFromCombigridOperation(op)
    surrogateConfig.basisFunction = basisFunction
    pce = pysgpp.createCombigridSurrogateModel(surrogateConfig)
    # compute sobol indices
    sobol_indices = pysgpp.DataVector(1)
    total_indices = pysgpp.DataVector(1)
    pce.getComponentSobolIndices(sobol_indices)
    pce.getTotalSobolIndices(total_indices)
    # print results
    print("Mean: {} Variance: {}".format(pce.mean(), pce.variance()))
    print("Sobol indices {}".format(sobol_indices.toString()))
    print("Total Sobol indices {}".format(total_indices.toString()))
    print("Sum {}\n".format(sobol_indices.sum()))

    print("Elapsed time: {} s".format(time.time() - start_time))
Exemple #12
0
def example3():
    ## Use Leja points unlike example 2 and use CombigridMultiOperation for evaluation at multiple
    ## points.
    operation = pysgpp.CombigridMultiOperation.createLinearLejaPolynomialInterpolation(
        d, func)

    ## We slightly deviate from the C++ example here and pass the interpolation points via a DataMatrix.
    ## We will use 2 interpolation points.
    ## IMPORTANT: For python, the parameters matrix needs to be transposed
    firstParam = [0.2, 0.6, 0.7]
    secondParam = [0.3, 0.9, 1.0]

    params = np.array([firstParam, secondParam])
    parameters = pysgpp.DataMatrix(params.transpose())
    print(parameters)

    ## Let's use the simple interface for this example and stop the time:
    stopwatch = pysgpp.Stopwatch()
    result = operation.evaluate(3, parameters)
    stopwatch.log()
    print("First result: " + str(result[0]) + ", function value: " +
          str(func(pysgpp.DataVector(firstParam))))
    print("Second result: " + str(result[1]) + ", function value: " +
          str(func(pysgpp.DataVector(secondParam))))
Exemple #13
0
def example5():
    ## First, we want to configure which grid points to use in which dimension.
    ## We use Chebyshev points in the 0th dimension. To make them nested, we have to use at least \f$n
    ## = 3^l\f$ points at level \f$l\f$. This is why this method contains the prefix exp.
    ## CombiHierarchies provides some matching configurations for grid points. If you nevertheless
    ## need your own configuration or you want to know which growth strategy and ordering fit to which
    ## point distribution, look up the implementation details in CombiHierarchies, it is not
    ## difficult to implement your own configuration.
    grids = pysgpp.AbstractPointHierarchyVector()
    grids.push_back(pysgpp.CombiHierarchies.expChebyshev())

    ## Our next set of grid points are Leja points with linear growth (\f$n = 1 + 3l\f$).
    ## For the last dimension, we use equidistant points with boundary. These are suited for linear
    ## interpolation. To make them nested, again the slowest possible exponential growth is selected
    ## by the CombiHierarchies class.
    grids.push_back(pysgpp.CombiHierarchies.linearLeja(3))
    grids.push_back(pysgpp.CombiHierarchies.expUniformBoundary())

    ## The next thing we have to configure is the linear operation that is performed in those
    ## directions. We will use polynomial interpolation in the 0th dimension, quadrature in the 1st
    ## dimension and linear interpolation in the 2nd dimension.
    ## Roughly spoken, this means that a quadrature is performed on the 1D function that is the
    ## interpolated function with two fixed parameters. But since those operators "commute", the
    ## result is invariant under the order that the operations are applied in.
    ## The CombiEvaluators class also provides analogous methods and typedefs for the multi-evaluation
    ## case.
    evaluators = pysgpp.FloatScalarAbstractLinearEvaluatorVector()
    evaluators.push_back(pysgpp.CombiEvaluators.polynomialInterpolation())
    evaluators.push_back(pysgpp.CombiEvaluators.quadrature())
    evaluators.push_back(pysgpp.CombiEvaluators.linearInterpolation())

    ## To create a CombigridOperation object with our own configuration, we have to provide a
    ## LevelManager as well:
    levelManager = pysgpp.WeightedRatioLevelManager()
    operation = pysgpp.CombigridOperation(grids, evaluators, levelManager,
                                          func)

    ## The two interpolations need a parameter \f$(x, z)\f$. If \f$\tilde{f}\f$ is the interpolated
    ## function, the operation approximates the result of \f$\int_0^1 \tilde{f}(x, y, z) \,dy\f$.
    parameters = pysgpp.DataVector([0.777, 0.14159])
    result = operation.evaluate(2, parameters)
    print("Result: " + str(result))
Exemple #14
0
def plotFunction(opEval, surpluses, X):
    if not doPlot: return

    # generate a meshgrid for plotting
    xx0 = np.linspace(0, 1, 65)
    xx1 = np.linspace(0, 1, 65)
    XX0, XX1 = np.meshgrid(xx0, xx1)
    XX = pysgpp.DataMatrix(np.column_stack([XX0.flatten(), XX1.flatten()]))

    # evaluate interpolant at meshgrid
    YY = pysgpp.DataVector(0)
    opEval.multiEval(surpluses, XX, YY)

    # convert resulting sgpp::base::DataVector to NumPy array
    YY = np.reshape(np.array([YY[k] for k in range(YY.getSize())]), XX0.shape)

    # actual plotting
    fig = plt.figure(figsize=(6, 6))
    ax = fig.gca(projection="3d")
    ax.plot_surface(XX0, XX1, YY)
    ax.plot(X[:, 0], X[:, 1], "k.", zs=f(X[:, 0], X[:, 1]), ms=10)
def getPercentiles(reSurf, percentages, numSamples, distribution='normal'):
    # create a large set of samples
    dim = reSurf.getNumDim()
    numTimeSteps = reSurf.getNumRes()
    if distribution == 'normal':
        mean = 1.0
        sd = 0.125
        lower = 0.5
        upper = 1.5
        rng = truncnorm((lower - mean) / sd, (upper - mean) / sd, loc=mean, scale=sd)
        sampleSet = rng.rvs((numSamples, dim))
    elif distribution == 'uniform':
        lb = 0.5
        ub = 1.5
        unitpoints = np.random.rand(numSamples, dim)
        sampleSet = np.zeros((numSamples, dim))
        for i, point in enumerate(unitpoints):
            for d in range(dim):
                sampleSet[i, d] = lb + (ub-lb)*point[d]

    point = pysgpp.DataVector(dim)
    results = np.zeros((numSamples, numTimeSteps))
    for i in range(numSamples):
        for d in range(dim):
            point[d] = sampleSet[i, d]
        res = reSurf.eval(point)
        for n in range(numTimeSteps):
            results[i, n] = res[n]
    print(f'max of all {numSamples} percentile samples: {np.max(results)*400:.4f}m')
    # calculate the percentiles from the set of samples
    # (Also Monte Carlo based mean to see if Stochastic Collocation
    # gives a significant increase in accuracy over simple MC.)
    percentiles = np.zeros((len(percentages), numTimeSteps))
    mcMeans = np.zeros(numTimeSteps)
    for n in range(numTimeSteps):
        mcMeans[n] = np.mean(results[:, n])
        for p in range(len(percentages)):
            percentiles[p, n] = np.percentile(results[:, n], percentages[p])
    return percentiles, mcMeans
Exemple #16
0
def test_firstMoment(grid, lmax):
    grid.getGenerator().regular(lmax)
    resolution = 100000
    gridStorage = grid.getStorage()
    b = grid.getBasis()
    op = pysgpp.createOperationFirstMoment(grid)
    alpha = pysgpp.DataVector(grid.getSize(), 1.0)
    bounds = pysgpp.DataMatrix(1, 2, 0.0)
    bounds.set(0, 1, 1.0)
    res = 0.0
    for i in range(grid.getSize()):
        lev = gridStorage.getPoint(i).getLevel(0)
        ind = gridStorage.getPoint(i).getIndex(0)
        temp_res = 0.0
        for c in range(resolution):
            x = float(c) / resolution
            temp_res += x * b.eval(lev, ind, x)
        res += alpha.get(i) * temp_res / resolution
    print("--FirstMoment--")
    print(res)
    print(op.doQuadrature(alpha, bounds))
    print(res - op.doQuadrature(alpha, bounds))
Exemple #17
0
def example6():

    ## To create a CombigridOperation, we currently have to use the longer way as in example 5.
    grids = pysgpp.AbstractPointHierarchyVector(
        d, pysgpp.CombiHierarchies.expUniformBoundary())
    evaluators = pysgpp.FloatScalarAbstractLinearEvaluatorVector(
        d, pysgpp.CombiEvaluators.cubicSplineInterpolation())
    levelManager = pysgpp.WeightedRatioLevelManager()

    ## We have to specify if the function always produces the same value for the same grid points.
    ## This can make the storage smaller if the grid points are nested. In this implementation, this
    ## is true. However, it would be false in the PDE case, so we set it to false here.
    exploitNesting = False

    ## Now create an operation as usual and evaluate the interpolation with a test parameter.
    operation = pysgpp.CombigridOperation(grids, evaluators, levelManager,
                                          pysgpp.gridFunc(gf), exploitNesting)

    parameter = pysgpp.DataVector([0.1, 0.2, 0.3])

    result = operation.evaluate(4, parameter)

    print("Target function value: " + str(func(parameter)))
    print("Numerical result: " + str(result))
Exemple #18
0
def example8(dist_type="uniform"):
    operation = pysgpp.CombigridOperation.createExpClenshawCurtisPolynomialInterpolation(
        d, func)

    config = pysgpp.OrthogonalPolynomialBasis1DConfiguration()

    if dist_type == "beta":
        config.polyParameters.type_ = pysgpp.OrthogonalPolynomialBasisType_JACOBI
        config.polyParameters.alpha_ = 5
        config.polyParameters.alpha_ = 4

        U = J(
            [Beta(config.polyParameters.alpha_, config.polyParameters.beta_)] *
            d)
    else:
        config.polyParameters.type_ = pysgpp.OrthogonalPolynomialBasisType_LEGENDRE
        U = J([Uniform(0, 1)] * d)

    basisFunction = pysgpp.OrthogonalPolynomialBasis1D(config)
    basisFunctions = pysgpp.OrthogonalPolynomialBasis1DVector(d, basisFunction)

    q = 3
    operation.getLevelManager().addRegularLevels(q)
    print("Total function evaluations: %i" % operation.numGridPoints())
    ## compute variance of the interpolant

    surrogateConfig = pysgpp.CombigridSurrogateModelConfiguration()
    surrogateConfig.type = pysgpp.CombigridSurrogateModelsType_POLYNOMIAL_CHAOS_EXPANSION
    surrogateConfig.loadFromCombigridOperation(operation)
    surrogateConfig.basisFunction = basisFunction
    pce = pysgpp.createCombigridSurrogateModel(surrogateConfig)

    n = 10000
    values = [g(pysgpp.DataVector(xi)) for xi in U.rvs(n)]
    print("E(u)   = %g ~ %g" % (np.mean(values), pce.mean()))
    print("Var(u) = %g ~ %g" % (np.var(values), pce.variance()))
Exemple #19
0

class objFuncSGpp(pysgpp.ScalarFunction):
    def __init__(self, dim):
        super(objFuncSGpp, self).__init__(dim)

    def eval(self, x):
        # input x: pysgpp.DataVector
        return x[0] * x[1] + x[1]


# create an instance of the objective
dim = 2
objFunc = objFuncSGpp(dim)
# set the objectives domain
lb = pysgpp.DataVector([0, 0])  # domain has lower bounds 0
ub = pysgpp.DataVector([1, 1])  # domain has upper bounds 1

# set up response surface
degree = 3
gridType = 'nakBsplineBoundary'

# create a response surface object
reSurf = pysgpp.SplineResponseSurface(objFunc, lb, ub,
                                      pysgpp.Grid.stringToGridType(gridType),
                                      degree)

# create surrogate with regular sparse grid
# reSurf.regular(2)

# create surrogate with spatially adaptive sparse grid
Exemple #20
0
# accuracy of the extension principle
numberOfAlphaSegments = 100

## We use regular sparse grids for the sparse grid surrogates.
print("Constructing the sparse grids...")

gridBSpline = pysgpp.Grid.createBsplineBoundaryGrid(d, p, b)
gridBSpline.getGenerator().regular(n)

gridLinear = pysgpp.Grid.createBsplineBoundaryGrid(d, 1, b)
gridLinear.getGenerator().regular(n)

N = gridBSpline.getSize()
gridStorage = gridBSpline.getStorage()

functionValues = pysgpp.DataVector(N)
x = pysgpp.DataVector(d)

for k in range(N):
    gridStorage.getPoint(k).getStandardCoordinates(x)
    functionValues[k] = f.eval(x)

## For the hierarchization for the B-spline surrogate, we solve the corresponding
## system of linear equations and create the interpolant and its gradient.
print("Hierarchizing (B-spline coefficients)...")

surplusesBSpline = pysgpp.DataVector(N)
hierSLEBSpline = pysgpp.HierarchisationSLE(gridBSpline)
sleSolverBSpline = pysgpp.AutoSLESolver()

if not sleSolverBSpline.solve(hierSLEBSpline, functionValues,
Exemple #21
0
 def eval_op_transpose(x, op, size):
     result_vec = sg.DataVector(size)
     x = sg.DataVector(np.array(x).flatten())
     op.multTranspose(x, result_vec)
     return result_vec.array().copy()
Exemple #22
0
    print("--------------------------------------------------------------------------------------")
    

dim = 2
radius = 0.1 
degree = 2
grid = pysgpp.Grid.createWEBsplineGrid(dim, degree)

gridStorage = grid.getStorage()
print("dimensionality:         {}".format(gridStorage.getDimension()))

level = 3
grid.getGenerator().regular(level)
print("number of grid points:  {}".format(gridStorage.getSize()))

alpha = pysgpp.DataVector(gridStorage.getSize(),0.0)
beta = pysgpp.DataVector(gridStorage.getSize(),0.0)
print("length of alpha vector: {}".format(len(alpha)))
print("length of beta vector: {}".format(len(beta)))

printLine()
for i in range(gridStorage.getSize()):
  gp = gridStorage.getPoint(i)
  alpha[i] = gp.getStandardCoordinate(0)
  beta[i] = gp.getStandardCoordinate(1)

#print("alpha: {}".format(alpha))
#print("beta: {}".format(beta))

x = np.zeros((len(alpha),dim))
eval_circle= np.zeros(len(alpha))
Exemple #23
0
print("dimensionality:        {}".format(dim))

# create regular grid, level 3
level = 3
gridGen = grid.getGenerator()
gridGen.regular(level)
print("number of grid points: {}".format(gridStorage.getSize()))

## Calculate the surplus vector alpha for the interpolant of \f$
## f(x)\f$.  Since the function can be evaluated at any
## point. Hence. we simply evaluate it at the coordinates of the
## grid points to obtain the nodal values. Then we use
## hierarchization to obtain the surplus value.

# create coefficient vector
alpha = pysgpp.DataVector(gridStorage.getSize())
for i in range(gridStorage.getSize()):
    gp = gridStorage.getPoint(i)
    p = tuple([gp.getStandardCoordinate(j) for j in range(dim)])
    alpha[i] = f(p)

pysgpp.createOperationHierarchisation(grid).doHierarchisation(alpha)

## Now we compute and compare the quadrature using four different methods available in SG++.

# direct quadrature
opQ = pysgpp.createOperationQuadrature(grid)
res = opQ.doQuadrature(alpha)
print("exact integral value:  {}".format(res))

# Monte Carlo quadrature using 100000 paths
Exemple #24
0
def main():
    # Generate data
    print("generate dataset... ", end=' ')
    data_tr,_ = generate_friedman1(123456)
    print("Done")
    print("generated a friedman1 dataset (10D) with 2000 samples")
    
    # Config grid
    print("create grid config... ", end=' ')
    grid = sg.RegularGridConfiguration()
    grid.dim_ = 10
    grid.level_ = 3
    grid.type_ = sg.GridType_Linear
    print("Done")

    # Config adaptivity
    print("create adaptive refinement config... ", end=' ')
    adapt = sg.AdaptivityConfiguration()
    adapt.numRefinements_ = 0
    adapt.noPoints_ = 10
    print("Done")
    
    # Config solver
    print("create solver config... ", end=' ')
    solv = sg.SLESolverConfiguration()
    solv.maxIterations_ = 1000
    solv.eps_ = 1e-14
    solv.threshold_ = 1e-14
    solv.type_ = sg.SLESolverType_CG
    print("Done")

    # Config regularization
    print("create regularization config... ", end=' ')
    regular = sg.RegularizationConfiguration()
    regular.regType_ = sg.RegularizationType_Laplace  
    print("Done")

    # Config cross validation for learner
    print("create learner config... ", end=' ')
    #crossValid = sg.CrossvalidationConfiguration()
    crossValid = sg.CrossvalidationConfiguration()
    crossValid.enable_ = False
    crossValid.kfold_ = 3
    crossValid.lambda_ = 3.16228e-06
    crossValid.lambdaStart_ = 1e-1
    crossValid.lambdaEnd_ = 1e-10
    crossValid.lambdaSteps_ = 3
    crossValid.logScale_ = True
    crossValid.shuffle_ = True
    crossValid.seed_ = 1234567
    crossValid.silent_ = False
    print("Done")

    #
    # Create the learner with the given configuration
    #
    print("create the learner... ")
    learner = sg.LearnerSGDE(grid, adapt, solv, regular, crossValid)
    learner.initialize(data_tr)
    
    # Train the learner
    print("start training... ")
    learner.train()
    print("done training")
    
    #
    # Estimate the probability density function (pdf) via a Gaussian kernel 
    # density estimation (KDE) and print the corresponding values
    #
    kde = sg.KernelDensityEstimator(data_tr)
    x = sg.DataVector(learner.getDim())
    x.setAll(0.5)
    
    print("-----------------------------------------------")
    print(learner.getSurpluses().getSize(), " -> ", learner.getSurpluses().sum())
    print("pdf_SGDE(x) = ", learner.pdf(x), " ~ ", kde.pdf(x), " = pdf_KDE(x)")
    print("mean_SGDE = ", learner.mean(), " ~ ", kde.mean(), " = mean_KDE")
    print("var_SGDE = ", learner.variance(), " ~ ", kde.variance(), " = var_KDE")
    
    # Print the covariances
    C = sg.DataMatrix(grid.dim_, grid.dim_)
    print("----------------------- Cov_SGDE -----------------------")
    learner.cov(C)
    print(C)
    print("----------------------- Cov_KDE -----------------------")
    kde.cov(C)
    print(C)
    
    #
    # Apply the inverse Rosenblatt transformatio to a matrix of random points. To 
    # do this, first generate the random points uniformly, then initialize an 
    # inverse Rosenblatt transformation operation and apply it to the points.
    # Finally print the calculated values
    #
    print("-----------------------------------------------")
    opInvRos = sg.createOperationInverseRosenblattTransformation(learner.getGrid())
    points = sg.DataMatrix(randu_mat(12, grid.dim_))
    print(points)
    
    pointsCdf = sg.DataMatrix(points.getNrows(), points.getNcols())
    opInvRos.doTransformation(learner.getSurpluses(), points, pointsCdf)
    
    #
    # To check whether the results are correct perform a Rosenform transformation on
    # the data that has been created by the inverse Rosenblatt transformation above
    # and print the calculated values
    #
    points.setAll(0.0)
    opRos = sg.createOperationRosenblattTransformation(learner.getGrid())
    opRos.doTransformation(learner.getSurpluses(), pointsCdf, points)
    
    print("-----------------------------------------------")
    print(pointsCdf)
    print("-----------------------------------------------")
    print(points)
Exemple #25
0
 def unitSample(self):
     dv = pysgpp.DataVector(self.k)
     self.gen.getSample(dv)
     return [dv[i] for i in range(self.k)]
Exemple #26
0
## With the iterative grid generator, we generate adaptively a sparse grid.
printLine()
print("Generating grid...\n")

if not gridGen.generate():
    print("Grid generation failed, exiting.")
    sys.exit(1)

## Then, we hierarchize the function values to get hierarchical B-spline
## coefficients of the B-spline sparse grid interpolant
## \f$\tilde{f}\colon [0, 1]^d \to \mathbb{R}\f$.
printLine()
print("Hierarchizing...\n")
functionValues = gridGen.getFunctionValues()
coeffs = pysgpp.DataVector(len(functionValues))
hierSLE = pysgpp.OptHierarchisationSLE(grid)
sleSolver = pysgpp.OptAutoSLESolver()

# solve linear system
if not sleSolver.solve(hierSLE, gridGen.getFunctionValues(), coeffs):
    print("Solving failed, exiting.")
    sys.exit(1)

## We define the interpolant \f$\tilde{f}\f$ and its gradient
## \f$\nabla\tilde{f}\f$ for use with the gradient method (steepest descent).
## Of course, one can also use other optimization algorithms from
## sgpp::optimization::optimizer.
printLine()
print("Optimizing smooth interpolant...\n")
ft = pysgpp.OptInterpolantScalarFunction(grid, coeffs)
Exemple #27
0
def example2():
    ## This time, we use Clenshaw-Curtis points with exponentially growing number of points per level.
    ## This is helpful for CC points to make them nested. Nested means that the set of grid points at
    ## one level is a subset of the set of grid points at the next level. Nesting can drastically
    ## reduce the number of needed function evaluations. Using these grid points, we will do
    ## polynomial interpolation at a single point.
    #operation = pysgpp.CombigridOperation.createExpClenshawCurtisPolynomialInterpolation(d, func)
    operation = pysgpp.CombigridOperation.createExpL2LejaPolynomialInterpolation(
        d, func)

    ## Now create a point where to evaluate the interpolated function:
    evaluationPoint = pysgpp.DataVector([0.1572, 0.6627, 0.2378])

    ## We can now evaluate the interpolation at this point (using 3 as a bound for the 1-norm of the
    ## level multi-index):
    result = operation.evaluate(3, evaluationPoint)

    ## Now compare the result to the actual function value:
    print("Interpolation result: " + str(result) + ", function value: " +
          str(func(evaluationPoint)))

    ## Again, print the number of function evaluations:
    print("Function evaluations: " + str(operation.numGridPoints()))

    ## Now, let's do another (more sophisticated) evaluation at a different point, so change the point
    ## and re-set the parameter. This method will automatically clear all intermediate values that
    ## have been computed internally up to now.
    evaluationPoint[0] = 0.4444
    print("Target function value: " + str(func(evaluationPoint)))
    operation.setParameters(evaluationPoint)

    ## The level manager provides more options for combigrid evaluation, so let's get it:
    levelManager = operation.getLevelManager()

    ## We can add regular levels like before:
    levelManager.addRegularLevels(3)

    ## The result can be fetched from the CombigridOperation:
    print("Regular result 1: " + str(operation.getResult()))
    print("Total function evaluations: " + str(operation.numGridPoints()))

    ## We can also add more points in a regular structure, using at most 50 new function evaluations.
    ## Most level-adding variants of levelManager also have a parallelized version. This version
    ## executes the calls to func in parallel with a specified number of threads, which is okay here
    ## since func supports parallel evaluations. Since func takes very little time to evaluate and the
    ## parallelization only concerns function evaluations and not the computations on the resulting
    ## function values, parallel evaluation is not actually useful in this case.
    ## We will use 4 threads for the function evaluations.
    levelManager.addRegularLevelsByNumPointsParallel(50, 4)
    print("Regular result 2: " + str(operation.getResult()))
    print("Total function evaluations: " + str(operation.numGridPoints()))

    ## We can also use adaptive level generation. The adaption strategy depends on the subclass of
    ## LevelManager that is used. If you do not want to use the default LevelManager, you can specify
    ## your own LevelManager:
    operation.setLevelManager(pysgpp.AveragingLevelManager())
    levelManager = operation.getLevelManager()

    ## It was necessary to use setLevelManager(), because this links the LevelManager to the
    ## computation. Now, let's add at most 60 more function evaluations adaptively.
    ## Note that the adaption here is only based on the result at our single evaluation point, which
    ## might give inaccurate results. The same holds for quadrature.
    ## In practice, you should probably do an interpolation at a lot of Monte-Carlo points via
    ## CombigridMultiOperation (cf. Example 3) and then transfer the generated level structure to
    ## another CombigridOperation or CombigridMultiOperation for your actual evaluation (cf. Example
    ## 4).
    levelManager.addLevelsAdaptive(60)
    print("Adaptive result: " + str(operation.getResult()))
    print("Total function evaluations: " + str(operation.numGridPoints()))

    ## We can also fetch the used grid points and plot the grid:
    grid = levelManager.getGridPointMatrix()
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    gridList = [[grid.get(r, c) for c in range(grid.getNcols())]
                for r in range(grid.getNrows())]

    ax.scatter(gridList[0], gridList[1], gridList[2], c='r', marker='o')
    ax.set_xlabel('x')
    ax.set_ylabel('y')
    ax.set_zlabel('z')
    plt.show()
Exemple #28
0
 def __call__(self, x):
   if (self.d == 1 and not isinstance(x, list)):
       x = [x]
   return self.opeval.eval(self.alpha, pysgpp.DataVector(x))
Exemple #29
0
def generate_friedman1(seed):
    (X, y) = data.make_friedman1(n_samples=10000, random_state=seed, noise=1.0)
    y = sg.DataVector(y)
    X = sg.DataMatrix(X)
    return X, y
means = [m / meanNormalizingFactor for m in means]

### Multiply everything by 400 so that we get real scale, not model scale ###
for p in range(len(percentiles)):
    for t in range(numTimeSteps):
        percentiles[p, t] *= 400
maxTimeline = [maxTimeline[t]*400 for t in range(numTimeSteps)]
means = [means[t]*400 for t in range(numTimeSteps)]
mcMeans *= 400

print(
    f'average mean error: {np.average(np.abs(means-reference_means))}     worst diff {np.max(np.abs(means-reference_means))}')

# Calculate VARIANCES
if calcVar:
    dummyMeanSquares = pysgpp.DataVector(1)
    dummyMeans = pysgpp.DataVector(1)
    start = time.time()
    variances = reSurf.getVariances(pdfs, quadOrder, dummyMeans, dummyMeanSquares)
    variances_py = [variances[t] for t in range(numTimeSteps)]
    variances_py = [v*400 for v in variances_py]
    print(f'Calculating the variances took {time.time()-start}s')
    np.savetxt('/home/rehmemk/git/anugasgpp/Okushiri/plots/variances.txt', variances_py)
else:
    if plotVar:
        print('LOADING VARIANCES FROM FILE. NO GUARANTEES FOR THEM MATCHING THE CURRENT CALCULATIONS!')
        variances_py = np.loadtxt('/home/rehmemk/git/anugasgpp/Okushiri/plots/variances.txt')


### Plotting ###
time = [t*22.5/450 for t in range(451)]