def test_parameters_iso():

    scale = []
    amplitude = 1.0
    extraParameter = []

    # model 1
    atom_ex = ot.IsotropicCovarianceModel(ot.MaternModel(), 2)
    atom_ex.setScale([5])
    atom_ex.setAmplitude([1.5])
    scale.append(5)
    amplitude *= 1.5
    extraParameter.append(atom_ex.getKernel().getFullParameter()[-1])

    # model2
    m = ot.MaternModel()
    m.setNu(2.5)
    m.setScale([3])
    m.setAmplitude([3])
    scale.append(3)
    amplitude *= 3
    extraParameter.append(m.getNu())

    # model 3
    atom = ot.IsotropicCovarianceModel(ot.AbsoluteExponential(), 2)
    atom.setScale([2])
    atom.setAmplitude([2.5])
    scale.append(2)
    amplitude *= 2.5

    model = ot.ProductCovarianceModel([atom_ex, m, atom])

    ott.assert_almost_equal(model.getScale(), scale, 1e-16, 1e-16)
    ott.assert_almost_equal(model.getAmplitude(), [amplitude], 1e-16, 1e-16)
    ott.assert_almost_equal(model.getFullParameter(),
                            scale + [amplitude] + extraParameter, 1e-16, 1e-16)

    # active parameter should be scale + amplitude
    ott.assert_almost_equal(model.getActiveParameter(),
                            [0, 1, 2, 3], 1e-16, 1e-16)

    # setting new parameters
    extraParameter = [2.5, 0.5]
    model.setFullParameter([6, 7, 8, 2] + extraParameter)

    ott.assert_almost_equal(model.getCollection()[
                            0].getScale()[0], 6, 1e-16, 1e-16)
    ott.assert_almost_equal(model.getCollection()[
                            1].getScale()[0], 7, 1e-16, 1e-16)
    ott.assert_almost_equal(model.getCollection()[
                            2].getScale()[0], 8, 1e-16, 1e-16)
    ott.assert_almost_equal(model.getAmplitude()[0], 2, 1e-16, 1e-16)
    ott.assert_almost_equal(model.getCollection(
    )[0].getFullParameter()[-1], extraParameter[0], 1e-16, 1e-16)
    ott.assert_almost_equal(model.getCollection(
    )[1].getFullParameter()[-1], extraParameter[1], 1e-16, 1e-16)

    # checking active par setting
    model.setActiveParameter([0, 1, 2, 3, 5])
    ott.assert_almost_equal(model.getParameter(), [
                            6, 7, 8, 2, extraParameter[-1]], 1e-16, 1e-16)
Ejemplo n.º 2
0
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
from math import sqrt

mesh = ot.IntervalMesher([256]).build(ot.Interval(-1.0, 1.0))
threshold = 0.001
factory = ot.KarhunenLoeveP1Factory(mesh, threshold)
model = ot.AbsoluteExponential(1, 1.0)
ev = ot.NumericalPoint()
modes = factory.buildAsProcessSample(model, ev)
for i in range(modes.getSize()):
    modes[i] = ot.Field(mesh, modes[i].getValues() * [sqrt(ev[i])])
g = modes.drawMarginal(0)
g.setXTitle("$t$")
g.setYTitle("$\sqrt{\lambda_n}\phi_n$")

fig = plt.figure(figsize=(6, 4))
plt.suptitle("P1 approx. of KL expansion for $C(s,t)=e^{-|s-t|}$")
axis = fig.add_subplot(111)
axis.set_xlim(auto=True)
View(g, figure=fig, axes=[axis], add_legend=False)
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot

try:
    mesh = ot.IntervalMesher([9]).build(ot.Interval(-1.0, 1.0))
    cov1D = ot.AbsoluteExponential([1.0])
    algo = ot.KarhunenLoeveP1Algorithm(mesh, cov1D, 0.0)
    algo.run()
    result = algo.getResult()
    lambd = result.getEigenValues()
    KLModes = result.getModesAsProcessSample()
    print("KL modes=", KLModes)
    print("KL eigenvalues=", lambd)
    process = ot.TemporalNormalProcess(cov1D, KLModes.getMesh())
    coefficients = result.project(process.getSample(10))
    print("KL coefficients=", coefficients)
    KLFunctions = result.getModes()
    print("KL functions=", KLFunctions)
    print("KL lift=", result.lift(coefficients[0]))
    print("KL lift as field=", result.liftAsField(coefficients[0]))
    R = ot.CorrelationMatrix(2)
    R[0, 1] = 0.5
    scale = [1.0]
    amplitude = [1.0, 2.0]
    cov2D = ot.ExponentialModel(scale, amplitude, R)
    algo = ot.KarhunenLoeveP1Algorithm(mesh, cov2D, 0.0)
    algo.run()
    result = algo.getResult()
    lambd = result.getEigenValues()
Ejemplo n.º 4
0
myDefautModel = ot.SquaredExponential([2.0], [3.0])
print('myDefautModel = ', myDefautModel)
test_model(myDefautModel)

myModel = ot.SquaredExponential([2.0] * inputDimension, [3.0])
test_model(myModel)

myDefautModel = ot.GeneralizedExponential([2.0], [3.0], 1.5)
print('myDefautModel = ', myDefautModel)
test_model(myDefautModel)

myModel = ot.GeneralizedExponential([2.0] * inputDimension, [3.0], 1.5)
test_model(myModel)

myDefautModel = ot.AbsoluteExponential([2.0], [3.0])
print('myDefautModel = ', myDefautModel)
test_model(myDefautModel)

myModel = ot.AbsoluteExponential([2.0] * inputDimension, [3.0])
test_model(myModel)

myDefautModel = ot.MaternModel([2.0], [3.0], 1.5)
print('myDefautModel = ', myDefautModel)
test_model(myDefautModel)

myModel = ot.MaternModel([2.0] * inputDimension, [3.0], 1.5)
test_model(myModel)

myDefautModel = ot.ExponentiallyDampedCosineModel([2.0], [3.0], 1.5)
print('myDefautModel = ', myDefautModel)
Ejemplo n.º 5
0
from __future__ import print_function
import openturns as ot
import openturns.viewer as viewer
from matplotlib import pylab as plt

ot.Log.Show(ot.Log.NONE)

# %%
# define a covariance model
defaultDimension = 1
# Amplitude values
amplitude = [1.0] * defaultDimension
# Scale values
scale = [1.0] * defaultDimension
# Covariance model
myModel = ot.AbsoluteExponential(scale, amplitude)

# %%
# define a mesh
tmin = 0.0
step = 0.1
n = 11
myTimeGrid = ot.RegularGrid(tmin, step, n)

# %%
# create the process
process = ot.GaussianProcess(myModel, myTimeGrid)
print(process)

# %%
# draw a sample
Ejemplo n.º 6
0
myDefautModel = ot.SquaredExponential()
print("myDefautModel = ",  myDefautModel)


myModel = ot.SquaredExponential(spatialDimension)
test_model(myModel)


myDefautModel = ot.GeneralizedExponential()
print("myDefautModel = ",  myDefautModel)

myModel = ot.GeneralizedExponential(spatialDimension, 10.0, 1.5)
test_model(myModel)


myDefautModel = ot.AbsoluteExponential()
print("myDefautModel = ",  myDefautModel)

myModel = ot.AbsoluteExponential(spatialDimension)
test_model(myModel)


myDefautModel = ot.MaternModel()
print("myDefautModel = ",  myDefautModel)

myModel = ot.MaternModel(spatialDimension, 8.0, 2.0)
test_model(myModel)


myDefautModel = ot.ProductCovarianceModel()
print("myDefautModel = ",  myDefautModel)
Ejemplo n.º 7
0
    # Set Numerical precision to 4
    ot.PlatformInfo.SetNumericalPrecision(4)
    sampleSize = 40
    spatialDimension = 1

    # Create the function to estimate
    model = ot.SymbolicFunction(["x0"], ["x0"])

    X = ot.Sample(sampleSize, spatialDimension)
    for i in range(sampleSize):
        X[i, 0] = 3.0 + (8.0 * i) / sampleSize
    Y = model(X)

    # Add a small noise to data
    Y += ot.GaussianProcess(ot.AbsoluteExponential(
        [0.1], [0.2]), ot.Mesh(X)).getRealization().getValues()

    basis = ot.LinearBasisFactory(spatialDimension).build()
    # Case of a misspecified covariance model
    covarianceModel = ot.DiracCovarianceModel(spatialDimension)
    print("===================================================\n")
    algo = ot.GeneralLinearModelAlgorithm(X, Y, covarianceModel, basis)
    algo.run()

    result = algo.getResult()
    print("\ncovariance (dirac, optimized)=", result.getCovarianceModel())
    print("trend (dirac, optimized)=", result.getTrendCoefficients())
    print("===================================================\n")
    # Now without estimating covariance parameters
    basis = ot.LinearBasisFactory(spatialDimension).build()
    covarianceModel = ot.DiracCovarianceModel(spatialDimension)
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
covarianceModel = ot.AbsoluteExponential()
if covarianceModel.getSpatialDimension() == 1:
    scale = covarianceModel.getScale()[0]
    if covarianceModel.isStationary():

        def f(x):
            return [covarianceModel(x)[0, 0]]

        func = ot.PythonFunction(1, 1, f)
        func.setDescription(['$tau$', '$cov$'])
        cov_graph = func.draw(-3.0 * scale, 3.0 * scale, 129)
        fig = plt.figure(figsize=(10, 4))
        plt.suptitle(str(covarianceModel))
        cov_axis = fig.add_subplot(111)
        View(cov_graph, figure=fig, axes=[cov_axis], add_legend=False)
    else:

        def f(x):
            return [covarianceModel([x[0]], [x[1]])[0, 0]]

        func = ot.PythonFunction(2, 1, f)
        func.setDescription(['$s$', '$t$', '$cov$'])
        cov_graph = func.draw([-3.0 * scale] * 2, [3.0 * scale] * 2, [129] * 2)
        fig = plt.figure(figsize=(10, 4))
        plt.suptitle(str(covarianceModel))
        cov_axis = fig.add_subplot(111)
        View(cov_graph, figure=fig, axes=[cov_axis], add_legend=False)
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot

try:
    mesh = ot.IntervalMesher(ot.Indices(1, 9)).build(ot.Interval(-1.0, 1.0))
    factory = ot.KarhunenLoeveP1Factory(mesh, 0.0)
    eigenValues = ot.NumericalPoint()
    KLModes = factory.buildAsProcessSample(ot.AbsoluteExponential([1.0]),
                                           eigenValues)
    print("KL modes=", KLModes)
    print("KL eigenvalues=", eigenValues)
    cov1D = ot.AbsoluteExponential([1.0])
    KLFunctions = factory.build(cov1D, eigenValues)
    print("KL functions=", KLFunctions)
    print("KL eigenvalues=", eigenValues)
    R = ot.CorrelationMatrix(2, [1.0, 0.5, 0.5, 1.0])
    scale = [1.0]
    amplitude = [1.0, 2.0]
    cov2D = ot.ExponentialModel(scale, amplitude, R)
    KLFunctions = factory.build(cov2D, eigenValues)
    print("KL functions=", KLFunctions)
    print("KL eigenvalues=", eigenValues)

except:
    import sys
    print("t_KarhunenLoeveP1Factory_std.py",
          sys.exc_info()[0],
          sys.exc_info()[1])
Ejemplo n.º 10
0
# %%
from __future__ import print_function
import openturns as ot
import openturns.viewer as viewer
from matplotlib import pylab as plt
ot.Log.Show(ot.Log.NONE)

# %%
# Define the covariance model :

# %%
dimension = 1
amplitude = [1.0] * dimension
scale = [10] * dimension
covarianceModel = ot.AbsoluteExponential(scale, amplitude)

# %%
# Define the time grid on which we want to sample the gaussian process :

# %%
# define a mesh
tmin = 0.0
step = 0.01
n = 10001
timeGrid = ot.RegularGrid(tmin, step, n)

# %%
# Finally define the gaussian process :

# %%
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
from math import sqrt

mesh = ot.IntervalMesher([128]).build(ot.Interval(-1.0, 1.0))
threshold = 0.001
covariance = ot.AbsoluteExponential([1.0])
algo = ot.KarhunenLoeveP1Algorithm(mesh, covariance, threshold)
algo.run()
process = ot.GaussianProcess(covariance, mesh)
sample = process.getSample(100)
validation = ot.KarhunenLoeveValidation(sample, algo.getResult())
g = validation.drawValidation()

fig = plt.figure(figsize=(6, 4))
axis = fig.add_subplot(111)
axis.set_xlim(auto=True)
View(g, figure=fig, axes=[axis], add_legend=False)
    # Set Numerical precision to 4
    ot.PlatformInfo.SetNumericalPrecision(4)
    sampleSize = 40
    spatialDimension = 1

    # Create the function to estimate
    model = ot.SymbolicFunction(["x0"], ["x0"])

    X = ot.NumericalSample(sampleSize, spatialDimension)
    for i in range(sampleSize):
        X[i, 0] = 3.0 + (8.0 * i) / sampleSize
    Y = model(X)

    # Add a small noise to data
    Y += ot.TemporalNormalProcess(ot.AbsoluteExponential([0.1], [0.2]),
                                  ot.Mesh(X)).getRealization().getValues()

    basis = ot.LinearBasisFactory(spatialDimension).build()
    # Case of a misspecified covariance model
    covarianceModel = ot.DiracCovarianceModel(spatialDimension)
    print("===================================================\n")
    algo = ot.GeneralizedLinearModelAlgorithm(X, Y, covarianceModel, basis)
    algo.run()

    result = algo.getResult()
    print("\ncovariance (dirac, optimized)=", result.getCovarianceModel())
    print("trend (dirac, optimized)=", result.getTrendCoefficients())
    print("===================================================\n")
    # Now without estimating covariance parameters
    basis = ot.LinearBasisFactory(spatialDimension).build()
Ejemplo n.º 13
0
# 2) GeneralizedExponential
myModel = ot.GeneralizedExponential([2.0], [3.0], 1.5)
ott.assert_almost_equal(myModel.getScale(), [2], 0, 0)
ott.assert_almost_equal(myModel.getAmplitude(), [3], 0, 0)
ott.assert_almost_equal(myModel.getP(), 1.5, 0, 0)
test_model(myModel)

myModel = ot.GeneralizedExponential([2.0] * inputDimension, [3.0], 1.5)
ott.assert_almost_equal(myModel.getScale(), [2, 2], 0, 0)
ott.assert_almost_equal(myModel.getAmplitude(), [3], 0, 0)
ott.assert_almost_equal(myModel.getP(), 1.5, 0, 0)
test_model(myModel)

# 3) AbsoluteExponential
myModel = ot.AbsoluteExponential([2.0], [3.0])
ott.assert_almost_equal(myModel.getScale(), [2], 0, 0)
ott.assert_almost_equal(myModel.getAmplitude(), [3], 0, 0)
test_model(myModel)

myModel = ot.AbsoluteExponential([2.0] * inputDimension, [3.0])
ott.assert_almost_equal(myModel.getScale(), [2, 2], 0, 0)
ott.assert_almost_equal(myModel.getAmplitude(), [3], 0, 0)
test_model(myModel)

# 4) MaternModel
myModel = ot.MaternModel([2.0], [3.0], 1.5)
ott.assert_almost_equal(myModel.getScale(), [2], 0, 0)
ott.assert_almost_equal(myModel.getAmplitude(), [3], 0, 0)
ott.assert_almost_equal(myModel.getNu(), 1.5, 0, 0)
test_model(myModel)
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot

try:
    mesh = ot.IntervalMesher(ot.Indices(1, 9)).build(ot.Interval(-1.0, 1.0))
    factory = ot.KarhunenLoeveP1Factory(mesh, 0.0);
    eigenValues = ot.NumericalPoint()
    KLModes = factory.buildAsProcessSample(ot.AbsoluteExponential(1, 1.0), eigenValues)
    print("KL modes=", KLModes)
    print("KL eigenvalues=", eigenValues)
    KLFunctions = factory.build(ot.AbsoluteExponential(1, 1.0), eigenValues)
    print("KL functions=", KLFunctions)
    print("KL eigenvalues=", eigenValues)

except:
    import sys
    print("t_KarhunenLoeveP1Factory_std.py", sys.exc_info()[0], sys.exc_info()[1])
Ejemplo n.º 15
0
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
from math import sqrt

mesh = ot.IntervalMesher([128]).build(ot.Interval(-1.0, 1.0))
threshold = 0.001
model = ot.AbsoluteExponential([1.0])
algo = ot.KarhunenLoeveP1Algorithm(mesh, model, threshold)
algo.run()
ev = algo.getResult().getEigenvalues()
modes = algo.getResult().getScaledModesAsProcessSample()
g = modes.drawMarginal(0)
g.setXTitle("$t$")
g.setYTitle("$\sqrt{\lambda_n}\phi_n$")
g.setTitle("P1 approx. of KL expansion for $C(s,t)=e^{-|s-t|}$")

fig = plt.figure(figsize=(6, 4))
axis = fig.add_subplot(111)
axis.set_xlim(auto=True)
View(g, figure=fig, axes=[axis], add_legend=False)
ot.Log.Show(ot.Log.NONE)

# %%
# Input model
print("Create the input process")
# Domain bound
a = 1
# Reference correlation length
b = 0.5
# Number of vertices in the mesh
N = 100
# Bandwidth of the smoothers
h = 0.05

mesh = ot.IntervalMesher([N - 1]).build(ot.Interval(-a, a))
covariance_X = ot.AbsoluteExponential([b])
process_X = ot.GaussianProcess(covariance_X, mesh)


# %%
# for some pretty graphs
def drawKL(scaledKL, KLev, mesh, title="Scaled KL modes"):
    graph_modes = scaledKL.drawMarginal()
    graph_modes.setTitle(title + " scaled KL modes")
    graph_modes.setXTitle('$x$')
    graph_modes.setYTitle(r'$\sqrt{\lambda_i}\phi_i$')
    data_ev = [[i, KLev[i]] for i in range(scaledKL.getSize())]
    graph_ev = ot.Graph()
    graph_ev.add(ot.Curve(data_ev))
    graph_ev.add(ot.Cloud(data_ev))
    graph_ev.setTitle(title + " KL eigenvalues")
Ejemplo n.º 17
0
# Set Numerical precision to 4
ot.PlatformInfo.SetNumericalPrecision(4)
sampleSize = 40
inputDimension = 1

# Create the function to estimate
model = ot.SymbolicFunction(["x0"], ["x0"])

X = ot.Sample(sampleSize, inputDimension)
for i in range(sampleSize):
    X[i, 0] = 3.0 + (8.0 * i) / sampleSize
Y = model(X)

# Add a small noise to data
Y += ot.GaussianProcess(ot.AbsoluteExponential([0.1], [0.2]),
                        ot.Mesh(X)).getRealization().getValues()

basis = ot.LinearBasisFactory(inputDimension).build()
# Case of a misspecified covariance model
covarianceModel = ot.DiracCovarianceModel(inputDimension)
print("===================================================\n")
algo = ot.GeneralLinearModelAlgorithm(X, Y, covarianceModel, basis)
algo.run()

result = algo.getResult()
print("\ncovariance (dirac, optimized)=", result.getCovarianceModel())
print("trend (dirac, optimized)=", result.getTrendCoefficients())
print("===================================================\n")

# Now without estimating covariance parameters
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
if ot.AbsoluteExponential().__class__.__name__ == 'ExponentialModel':
    covarianceModel = ot.ExponentialModel([0.5], [5.0])
elif ot.AbsoluteExponential().__class__.__name__ == 'GeneralizedExponential':
    covarianceModel = ot.GeneralizedExponential([2.0], [3.0], 1.5)
elif ot.AbsoluteExponential().__class__.__name__ == 'ProductCovarianceModel':
    amplitude = [1.0]
    scale1 = [4.0]
    scale2 = [4.0]
    cov1 = ot.ExponentialModel(scale1, amplitude)
    cov2 = ot.ExponentialModel(scale2, amplitude)
    covarianceModel = ot.ProductCovarianceModel([cov1, cov2])
elif ot.AbsoluteExponential().__class__.__name__ == 'RankMCovarianceModel':
    variance = [1.0, 2.0]
    basis = ot.LinearBasisFactory().build()
    covarianceModel = ot.RankMCovarianceModel(variance, basis)
else:
    covarianceModel = ot.AbsoluteExponential()
title = str(covarianceModel)[:100]
if covarianceModel.getInputDimension() == 1:
    scale = covarianceModel.getScale()[0]
    if covarianceModel.isStationary():

        def f(x):
            return [covarianceModel(x)[0, 0]]

        func = ot.PythonFunction(1, 1, f)
        func.setDescription(['$tau$', '$cov$'])
        cov_graph = func.draw(-3.0 * scale, 3.0 * scale, 129)