def get_process_kl_decomposition(mean, coef_var=None, amplitude=None, scale=0, nu=1, mesh=None, dimension=1, name='', threshold= 1e-3):
    # for matern model only
    if amplitude is None and coef_var is not None:
        amplitude = [float(mean*coef_var)]*dimension
    else :
        amplitude = [float(amplitude)]*dimension
    scale = [float(scale)]*dimension
    model = ot.MaternModel(scale, amplitude, float(nu))
    # Karhunen Loeve decomposition of process
    algorithm = ot.KarhunenLoeveP1Algorithm(mesh, model, threshold)
    algorithm.run()
    results = algorithm.getResult()
    results.setName(name)
    return results
Exemple #2
0
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
from math import sqrt

mesh = ot.IntervalMesher([128]).build(ot.Interval(-1.0, 1.0))
threshold = 0.001
model = ot.AbsoluteExponential([1.0])
algo = ot.KarhunenLoeveP1Algorithm(mesh, model, threshold)
algo.run()
ev = algo.getResult().getEigenvalues()
modes = algo.getResult().getScaledModesAsProcessSample()
g = modes.drawMarginal(0)
g.setXTitle("$t$")
g.setYTitle("$\sqrt{\lambda_n}\phi_n$")
g.setTitle("P1 approx. of KL expansion for $C(s,t)=e^{-|s-t|}$")

fig = plt.figure(figsize=(6, 4))
axis = fig.add_subplot(111)
axis.set_xlim(auto=True)
View(g, figure=fig, axes=[axis], add_legend=False)
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot

try:
    mesh = ot.IntervalMesher([9]).build(ot.Interval(-1.0, 1.0))
    cov1D = ot.AbsoluteExponential([1.0])
    algo = ot.KarhunenLoeveP1Algorithm(mesh, cov1D, 0.0)
    algo.run()
    result = algo.getResult()
    lambd = result.getEigenValues()
    KLModes = result.getModesAsProcessSample()
    print("KL modes=", KLModes)
    print("KL eigenvalues=", lambd)
    process = ot.TemporalNormalProcess(cov1D, KLModes.getMesh())
    coefficients = result.project(process.getSample(10))
    print("KL coefficients=", coefficients)
    KLFunctions = result.getModes()
    print("KL functions=", KLFunctions)
    print("KL lift=", result.lift(coefficients[0]))
    print("KL lift as field=", result.liftAsField(coefficients[0]))
    R = ot.CorrelationMatrix(2)
    R[0, 1] = 0.5
    scale = [1.0]
    amplitude = [1.0, 2.0]
    cov2D = ot.ExponentialModel(scale, amplitude, R)
    algo = ot.KarhunenLoeveP1Algorithm(mesh, cov2D, 0.0)
    algo.run()
    result = algo.getResult()
    lambd = result.getEigenValues()
    graph_ev.setAxes(True)
    graph_ev.setGrid(True)
    graph_ev.setLogScale(2)
    bb = graph_ev.getBoundingBox()
    lower = bb.getLowerBound()
    lower[1] = 1.0e-7
    bb = ot.Interval(lower, bb.getUpperBound())
    graph_ev.setBoundingBox(bb)
    return graph_modes, graph_ev


# %%
# Karhunen-Loeve decomposition of the input process
print("Compute the decomposition of the input process")
threshold = 0.0001
algo_X = ot.KarhunenLoeveP1Algorithm(mesh, process_X.getCovarianceModel(),
                                     threshold)
algo_X.run()
result_X = algo_X.getResult()
phi_X = result_X.getScaledModesAsProcessSample()
lambda_X = result_X.getEigenvalues()

graph_modes_X, graph_ev_X = drawKL(phi_X, lambda_X, mesh, "X")
view = viewer.View(graph_modes_X)

# %%
# Input database generation
print("Sample the input process")
size = 1000
sample_X = process_X.getSample(size)

Exemple #5
0
        self.KLResult_ = KLResult
        self.nc_ = 0

    def _exec(self, X):
        self.nc_ += 1
        print("nc=", self.nc_)
        return self.KLResult_.liftAsField(X).getValues().asPoint()


# Covariance model
covariance = ot.SquaredExponential([0.1] * 2)

print("Build KL quadrature")
ot.ResourceMap.SetAsScalar("KarhunenLoeveP1Algorithm-RegularizationFactor",
                           1.0e-12)
algo = ot.KarhunenLoeveP1Algorithm(mesh, covariance, threshold)
ot.Log.Show(ot.Log.INFO)
algo.run()
result = algo.getResult()

F = FUNC(mesh, result)
model = ot.Function(F)

dim = model.getInputDimension()
print("dim=", dim)
size = dim + 1
distribution = ot.ComposedDistribution([ot.Normal()] * dim)
weightedExperiment = ot.MonteCarloExperiment(distribution, size)
inSample, weights = weightedExperiment.generateWithWeights()
print("Sample model")
t0 = time()
    f = ot.SymbolicFunction(['x1', 'x2'], ['x1+x2'])
    print('ok')
except:
    print('n/a')

# check that hmat library was found
print(' 8: h-matrix (hmat-oss)'.ljust(width), end=' ')
print('ok' if ot.HMatrixFactory.IsAvailable() else 'n/a')

# check that spectra library was found
print(' 9: iterative SVD (Spectra)'.ljust(width), end=' ')
ot.ResourceMap.SetAsString("KarhunenLoeveP1Algorithm-EigenvaluesSolver",
                           "SPECTRA")
mesh = ot.IntervalMesher([9]).build(ot.Interval(-1.0, 1.0))
cov1D = ot.AbsoluteExponential([1.0])
algo = ot.KarhunenLoeveP1Algorithm(mesh, cov1D, 1e-3)
try:
    algo.run()
    result = algo.getResult()
    lambd = result.getEigenvalues()
    print('ok')
except:
    print('n/a')

# check that TBB library was found
print(' 10: multithreading (TBB)'.ljust(width), end=' ')
print('ok' if ot.TBB.IsAvailable() else 'n/a')

# check that psutil was found
print('11: process control (psutil)'.ljust(width), end=' ')
try:
Exemple #7
0
ot.RandomGenerator_SetSeed(888)
field_1D = process_1D.getRealization()
field_2D = process_2D.getRealization()
scalar_0 = [scalar_distribution.getRealization()]

print('For field 1D:\n', field_1D, '\n')
print('For field 2D:\n', field_2D, '\n')
print('For scalar :\n', scalar_0, '\n')
output_dummy_0 = dummyFunction2Wrap(field_2D, field_1D, scalar_0)

print('Output is:\n', output_dummy_0)

## Now that we have our processes defined, our realizations and the corresponding output
## we can create our aggregated object, wrap our function, and check if it behaves accordingly
### For that we will first have to do the Karhunen-Loeve decomposition of the processes.
algo_kl_process_1D = ot.KarhunenLoeveP1Algorithm(
    mesh_1D, process_1D.getCovarianceModel())
algo_kl_process_1D.run()
kl_results_1D = algo_kl_process_1D.getResult()

algo_kl_process_2D = ot.KarhunenLoeveP1Algorithm(
    mesh_2D, process_2D.getCovarianceModel())
algo_kl_process_2D.run()
kl_results_2D = algo_kl_process_2D.getResult()

### Now let's compose our Karhunen Loeve Results and our distributions.
composedKLResultsAndDistributions = aklr.AggregatedKarhunenLoeveResults(
    [kl_results_2D, kl_results_1D, scalar_distribution])

### Now let's see if we manage to project and lift the realizations we had before.
realizationFields = [field_2D, field_1D, ot.Field(ot.Mesh(), [scalar_0[0]])]
projectedCoeffs = composedKLResultsAndDistributions.project(realizationFields)