Exemplo n.º 1
0
 def _exec(self, X):
     y0 = X[0]
     a = X[1]
     b = X[2]
     phi_ab = ot.ParametricFunction(self.phi_, [2, 3], [a, b])
     phi_t = ot.ParametricFunction(phi_ab, [0], [0.0])
     solver = ot.RungeKutta(phi_t)
     initialState = [y0]
     values = solver.solve(initialState, self.ticks_)
     return values * [1.0e-6]
Exemplo n.º 2
0
def buildFunction(beta, nu, N, t0):
    '''
       Build the transition function of the SIR model given the values of
       beta and nu
    '''
    f = ot.SymbolicFunction(['t', 'S', 'I', 'R'], [
        str(-beta / N) + '*S*I',
        str(beta / N) + 'S*I-' + str(nu) + '*I',
        str(nu) + '*I'
    ])
    phi = ot.ParametricFunction(f, [0], [t0])
    return phi
Exemplo n.º 3
0
    def __init__(self, simulateur, annee, S, D):
        """
        Crée un modèle de pension probabiliste.
        
        Paramètres :
            simulateur : un SimulateurRetraite
            annee : un flottant, l'année de calcul de P
            S : un flottant, le solde financier en part de PIB
            D : un flottant positif, le montant des dépenses de retraites 
            en part de PIB
        
        Description :
            Crée un modèle de pension probabiliste pour le 
            ratio (pension moyenne) / (salaire moyen).
            
            Les entrées du modèle sont "As", "F", "TauC" 
            et la sortie est "P". 
            
            Les paramètres S et D sont fixés par le constructeur 
            de la classe au moment de la création de l'objet. 
            
            * S : le solde financier du système de retraites (% PIB)
            * D : le montant des dépenses (% PIB)
            * As : l'âge moyen de départ à la retraite défini par l'utilisateur
            * F  : facteur d'élasticité de report de l'âge de départ 
                (par exemple F=0.5)
            * TauC : le taux de chômage (par exemple TauC = 4.5)

            Les distributions des variables sont ot.Uniform 
            et indépendantes.

        Exemple :
            S = 0.0
            D = 0.14
            annee = 2050
            modele = ModelePensionProbabiliste(simulateur, annee, S, D)
            fonction = modele.getFonction()
            inputDistribution = modele.getInputDistribution()
        """
        # Crée le modèle de pension complet : entrées = (S, D, As, F, TauC)
        modelePension = ot.Function(FonctionPension(simulateur, annee))
        # Crée le modèle réduit à partir du modèle complet : entrées = (As, F, TauC)
        indices = ot.Indices([0, 1])
        referencePoint = ot.Point([S, D])
        self.fonction = ot.ParametricFunction(modelePension, indices, referencePoint)
        # Distribution
        As = ot.Uniform(62.0, 66.0)
        F = ot.Uniform(0.25, 0.75)
        TauC = ot.Uniform(4.5, 10.0)
        self.inputDistribution = ot.ComposedDistribution([As, F, TauC])
        self.inputDistribution.setDescription(["As", "F", "TauC"])
        return
Exemplo n.º 4
0
    def __init__(self):
        # dimension
        self.dim = 3
        # Fixed parameters for the Ishigami function
        self.a = 7.0
        self.b = 0.1

        # First marginal : X1
        self.X1 = ot.Uniform(-m.pi, m.pi)
        self.X1.setName("X1")

        # Second marginal : X2
        self.X2 = ot.Uniform(-m.pi, m.pi)
        self.X2.setName("X2")

        # Third marginal : X3
        self.X3 = ot.Uniform(-m.pi, m.pi)
        self.X3.setName("X1")

        # Input distribution
        self.distributionX = ot.ComposedDistribution(
            [self.X1, self.X2, self.X3])
        self.distributionX.setDescription(['X1', 'X2', 'X3'])

        self.ishigami = ot.SymbolicFunction(
            ['X1', 'X2', 'X3', 'a', 'b'],
            ['sin(X1) + a * sin(X2)^2 + b * X3^4 * sin(X1)'])
        # The Ishigami model
        self.model = ot.ParametricFunction(self.ishigami, [3, 4],
                                           [self.a, self.b])

        self.expectation = self.a / 2.0
        self.variance = 1.0/2 + self.a**2/8.0 + \
            self.b*m.pi**4/5.0 + self.b**2*m.pi**8/18.0
        self.S1 = (1.0 / 2.0 + self.b * m.pi**4 / 5.0 +
                   self.b**2 * m.pi**8 / 50.0) / self.variance
        self.S2 = (self.a**2 / 8.0) / self.variance
        self.S3 = 0.0
        self.S12 = 0.0
        self.S13 = self.b**2 * m.pi**8 / 2.0 * (1.0 / 9.0 -
                                                1.0 / 25.0) / self.variance
        self.S23 = 0.0
        self.S123 = 0.0
        self.ST1 = self.S1 + self.S13
        self.ST2 = self.S2
        self.ST3 = self.S3 + self.S13
Exemplo n.º 5
0
def functionCrue4VarsStochastic(X):
    Q, Ks, Zv, Zm = X
    # 1. Creation of the problem function
    f8v = ot.PythonFunction(8, 1, functionCrue8vars)
    g = ot.ParametricFunction(f8v, [0, 1, 2, 3], X)
    # 2. Random vector definition
    Hd = ot.Uniform(4.,14.)
    Zb = ot.Uniform(50.,60.)
    L = ot.Uniform(1000.,10000.)
    B = ot.Uniform(50.,500.)
    inputvector = ot.ComposedDistribution([Hd, Zb, L, B])
    inputRV = ot.RandomVector(inputvector)
    S = ot.CompositeRandomVector(g, inputRV)
    # 3. Sample output
    sampleSize = 10
    outputSample = S.getSample(sampleSize)
    Smean = outputSample.computeMean()[0]
    return [Smean]
Exemplo n.º 6
0
    def _runMonteCarlo(self, defect):
        # set a parametric function where the first parameter = given defect
        g = ot.ParametricFunction(self._metamodel, [0], [defect])
        g = ot.MemoizeFunction(g)
        g.enableHistory()
        g.clearHistory()
        g.clearCache()
        output = ot.CompositeRandomVector(g,
                                          ot.RandomVector(self._distribution))
        event = ot.ThresholdEvent(output, ot.Greater(), self._detectionBoxCox)

        ##### Monte Carlo ########
        algo_MC = ot.ProbabilitySimulationAlgorithm(event)
        algo_MC.setMaximumOuterSampling(self._samplingSize)
        # set negative coef of variation to be sure the stopping criterion is the sampling size
        algo_MC.setMaximumCoefficientOfVariation(-1)
        algo_MC.run()
        return algo_MC.getResult()
    def __init__(self, a=7.0, b=0.1):
        """
        Create a Ishigami sensitivity problem.

        Parameters
        ----------
        a : float
            The first parameter.

        b : float
            The second parameter.

        Example
        -------
        problem  = IshigamiSensitivityBenchmarkProblem()
        """

        # Define the function
        formula = ["sin(X1) + a * sin(X2)^2 + b * X3^4 * sin(X1)"]
        input_names = ["X1", "X2", "X3", "a", "b"]
        fullFunction = ot.SymbolicFunction(input_names, formula)
        indices = [3, 4]
        referencePoint = [a, b]
        function = ot.ParametricFunction(fullFunction, indices, referencePoint)

        # Define the distribution
        inputDimension = 3
        distributionList = [ot.Uniform(-np.pi, np.pi)] * inputDimension
        distribution = ot.ComposedDistribution(distributionList)

        name = "Ishigami"

        # Compute exact indices
        exact = self.ComputeIndices(a, b)
        firstOrderIndices = ot.Point([exact["S1"], exact["S2"], exact["S3"]])
        totalOrderIndices = ot.Point([exact["T1"], exact["T2"], exact["T3"]])

        super(IshigamiSensitivityBenchmarkProblem, self).__init__(
            name, distribution, function, firstOrderIndices, totalOrderIndices
        )

        return None
Exemplo n.º 8
0
    def buildCrossCutFunction(self, i, j):
        """
        Create the cross-cut parametric function for projection (i,j).

        The parametric function is the event function where the
        only free variables are (X[i], X[j]) and other variables
        are set to the mean point.

        We must have i < j, otherwise the function
        would be evaluated at the wrong input X.

        Parameters
        ----------
        i : int
            The index of the first marginal of the cross-cut.
        j : int
            The index of the second marginal of the cross-cut.

        Returns
        -------
        crosscutFunction : ot.Function
            The cross-cut function.
        """
        if j < i:
            raise ValueError("i=%d > j=%d." % (i, j))
        inputVector = self.event.getAntecedent()
        distribution = inputVector.getDistribution()
        mean = distribution.getMean()
        indices = []
        point = []
        for k in range(self.inputDimension):
            if k != i and k != j:
                indices.append(k)
                point.append(mean[k])
        crosscutFunction = ot.ParametricFunction(self.g, indices, point)
        return crosscutFunction
Exemplo n.º 9
0
# .. math::
#    f : \underline{X} \mapsto \underline{\underline{A}} ( \underline{X} - \underline{b} ) + \underline{c} + \frac{1}{2} \underline{X}^T \times \underline{\underline{\underline{M}}} \times \underline{X}
#

# %%
import openturns as ot
import openturns.viewer as viewer
from matplotlib import pylab as plt
import math as m
ot.Log.Show(ot.Log.NONE)

# %%
# create a quadratic function
inputDimension = 3
outputDimension = 2
center = [1.0] * inputDimension
constant = [-1.0, 2.0]  # c
linear = ot.Matrix(inputDimension, outputDimension)  # A
quadratic = ot.SymmetricTensor(inputDimension, outputDimension)  # M
quadratic[0, 0, 1] = 3.0
function = ot.QuadraticFunction(center, constant, linear, quadratic)
x = [7.0, 8.0, 9.0]
print(function(x))

# %%
# draw y1 with x1=2.0, x2=1.0, x0 in [0, 2]
graph = ot.ParametricFunction(
    function, [1, 2], [2.0, 1.0]).getMarginal(1).draw(0.0, 2.0)
view = viewer.View(graph)
plt.show()
Exemplo n.º 10
0
# %%
algo = ot.FunctionalChaosAlgorithm(inputSample, outputSample)
algo.run()
result = algo.getResult()
metamodel = result.getMetaModel()

# %%
# Plot the second output of our model depending on :math:`x_2` with :math:`x_1=0.5`. In order to do this, we create a `ParametricFunction` and set the value of :math:`x_1`. Then we use the `getMarginal` method to extract the second output (which index is equal to 1). 

# %%
x1index = 0
x1value = 0.5
x2min = -3.
x2max = 3.
outputIndex = 1
metamodelParametric = ot.ParametricFunction(metamodel, [x1index], [x1value])
graph = metamodelParametric.getMarginal(outputIndex).draw(x2min, x2max)
graph.setLegends(["Metamodel"])
modelParametric = ot.ParametricFunction(model, [x1index], [x1value])
curve = modelParametric.getMarginal(outputIndex).draw(x2min, x2max).getDrawable(0)
curve.setColor('red')
curve.setLegend("Model")
graph.add(curve)
graph.setLegendPosition("bottomright")
graph.setXTitle("X2")
graph.setTitle("Metamodel Validation, output #%d" % (outputIndex))
view = viewer.View(graph)

# %%
# We see that the metamodel fits approximately to the model, except perhaps for extreme values of :math:`x_2`. However, there is a better way of globally validating the metamodel, using the `MetaModelValidation` on a validation design of experiment. 
Exemplo n.º 11
0
y_obs[5, 0] = 0.661725805623086
y_obs[6, 0] = -1.57581204592385
y_obs[7, 0] = -2.95308465670895
y_obs[8, 0] = -8.8878164296758
y_obs[9, 0] = -13.0812290405651
print('y_obs=', y_obs)

p = ot.Sample(obsSize, chainDim)
for i in range(obsSize):
    for j in range(chainDim):
        p[i, j] = (-2 + 5. * i / 9.)**j
print('p=', p)

fullModel = ot.SymbolicFunction(['p1', 'p2', 'p3', 'x1', 'x2', 'x3'],
                                ['p1*x1+p2*x2+p3*x3', '1.0'])
linkFunction = ot.ParametricFunction(fullModel, range(chainDim),
                                     [0.0] * chainDim)

# instrumental distribution
instrumental = ot.Uniform(-1., 1.)

# prior distribution
sigma0 = [10.0] * chainDim
Q0 = ot.CorrelationMatrix(chainDim)  # precision matrix
Q0_inv = ot.CorrelationMatrix(chainDim)  # variance matrix
for i in range(chainDim):
    Q0_inv[i, i] = sigma0[i] * sigma0[i]
    Q0[i, i] = 1.0 / Q0_inv[i, i]
print('Q0=', Q0)

mu0 = [0.0] * chainDim
prior = ot.Normal(mu0, Q0_inv)  # x0 ~ N(mu0, sigma0)
view = otv.View(graph)

# %%
# A common pre-processing step is to apply a transform on the input data before performing the kriging.
# To do so we write a linear transform of our input data : we make it unit centered at its mean. Then we fix the mean and the standard deviation to their values with the `ParametricFunction`. We build the inverse transform as well.
#
# We first compute the mean and standard deviation of the input data :
mean = Xtrain.computeMean()[0]
stdDev = Xtrain.computeStandardDeviation()[0]
print("Xtrain, mean : %.3f" % mean)
print("Xtrain, standard deviation : %.3f" % stdDev)

# %%
tf = ot.SymbolicFunction(['mu', 'sigma', 'x'], ['(x-mu)/sigma'])
itf = ot.SymbolicFunction(['mu', 'sigma', 'x'], ['sigma*x+mu'])
myInverseTransform = ot.ParametricFunction(itf, [0, 1], [mean, stdDev])
myTransform = ot.ParametricFunction(tf, [0, 1], [mean, stdDev])

# %%
# A constant basis
# ----------------
#
# In this paragraph we choose a basis constant for the kriging. There is only one unknown which is the
# value of the constant. The basis is built with the :class:`~openturns.ConstantBasisFactory` class.
basis = ot.ConstantBasisFactory(dimension).build()

# %%
# We build the kriging algorithm by giving it the transformed data, the output data, the covariance
# model and the basis.
algo = ot.KrigingAlgorithm(myTransform(Xtrain), Ytrain, covarianceModel, basis)
Exemplo n.º 13
0
# Generate design of experiment
# -----------------------------
#
# We create training samples from the function :math:`x\sin(x)`. We can change their number and distribution in the :math:`[0; 10]` range.
# If the `with_error` boolean is `True`, then the data is computed by adding a gaussian noise to the function values.

# %%
dim = 1
xmin = 0
xmax = 10
n_pt = 20  # number of initial points
with_error = True  # whether to use generation with error

# %%
ref_func_with_error = ot.SymbolicFunction(['x', 'eps'], ['x * sin(x) + eps'])
ref_func = ot.ParametricFunction(ref_func_with_error, [1], [0.0])
x = np.vstack(np.linspace(xmin, xmax, n_pt))
ot.RandomGenerator.SetSeed(1235)
eps = ot.Normal(0, 1.5).getSample(n_pt)
X = ot.Sample(n_pt, 2)
X[:, 0] = x
X[:, 1] = eps
if with_error:
    y = np.array(ref_func_with_error(X))
else:
    y = np.array(ref_func(x))

# %%
graph = ref_func.draw(xmin, xmax, 200)
cloud = ot.Cloud(x, y)
cloud.setColor('red')
Exemplo n.º 14
0
criticalDomain = ot.Interval(5, float('inf'))

# %%
# We have access to the distance to this  domain thanks to the
# :class:`~openturns.DistanceToDomainFunction` class.
dist2criticalDomain = ot.DistanceToDomainFunction(criticalDomain)

# %%
# We define the parameters in our function from the output sample
s = 0.1 * Y.computeStandardDeviation()[0]

# %%
# We now define our filter function by composition of the parametrized function and
# the distance function.
f = ot.SymbolicFunction(["x", "s"], ["exp(-x/s)"])
phi = ot.ParametricFunction(f, [1], [s])
filterFunction = ot.ComposedFunction(phi, dist2criticalDomain)

# %%
# We choose an unbiased estimator
estimatorType = ot.HSICUStat()

# %%
# and build the HSIC estimator
targetHSIC = ot.HSICEstimatorTargetSensitivity(covarianceModelCollection, X, Y,
                                               estimatorType, filterFunction)

# %%
# We get the R2-HSIC indices:
R2HSICIndices = targetHSIC.getR2HSICIndices()
print("\n Target HSIC analysis")
#!/usr/bin/env python

from __future__ import print_function
import openturns as ot
import openturns.testing as ott
import math as m


f = ot.SymbolicFunction(
    ['tau', 'alpha'], ['cos(4*tau)*cosh((tau-pi_)/alpha)/cosh(pi_/alpha)'])
alpha = 1.36
rho = ot.ParametricFunction(f, [1], [alpha])

cov = ot.StationaryFunctionalCovarianceModel([1.0], [1.0], rho)
print(cov)

tau = 0.1
c = cov([tau])[0, 0]
print("tau=", tau)
print("c=", c)
c_ref = m.cos(4*tau)*m.cosh((tau-m.pi)/alpha)/m.cosh(m.pi/alpha)
ott.assert_almost_equal(c, c_ref)

assert len(cov.getFullParameter()) == 3, "wrong full parameter"
assert len(cov.getFullParameterDescription()
           ) == 3, "wrong full parameter description"

print(cov.getFullParameter())
print(cov.getFullParameterDescription())

assert len(cov.getActiveParameter()) == 2, "wrong active parameter"
    quantile = beta*(-np.log1p(-p))**(1/alpha)
    return [quantile]


# %%
quantileFunction = ot.PythonFunction(3, 1, weibullQ)

# %%
# We define the parameters of the Weibull distribution and create the parametric function.

# %%
alpha = 10.0
beta = 1.0

# %%
quantile = ot.ParametricFunction(quantileFunction,[1,2], [alpha,beta])
quantile

# %%
# In the library, the uniform distribution is by default over the :math:`[-1,1]` interval. To obtain a uniform distribution over :math:`[0,1]`, we need to set the bounds explicitly.

# %%
U = ot.Uniform(0.,1.)

# %%
# Then we generate a sample of size 1000 from the uniform distribution.

# %%
n = 1000
uniformSample = U.getSample(n)
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View


def flow(X):
    Y0 = X[0]
    Y1 = X[1]
    t = X[2]
    dY0 = Y0 * (2.0 - Y1)
    dY1 = Y1 * (Y0 - 1.0)
    return [dY0, dY1]


f = ot.PythonFunction(3, 2, flow)
phi = ot.ParametricFunction(f, [2], [0.0])
solver = ot.RungeKutta(phi)

initialState = [2.0, 2.0]
nt = 47
dt = 0.1
timeGrid = ot.RegularGrid(0.0, dt, nt)
result = solver.solve(initialState, timeGrid)
xMin = result.getMin()
xMax = result.getMax()
delta = 0.2 * (xMax - xMin)
mesh = ot.IntervalMesher([12] * 2).build(
    ot.Interval(xMin - delta, xMax + delta))
field = ot.Field(mesh, phi(mesh.getVertices()))
ot.ResourceMap.SetAsScalar("Field-ArrowScaling", 0.1)
graph = field.draw()
covarianceModelCollection.add(Cov2)

# We choose an estimator type :
#  - unbiased: HSICUStat;
#   - biased: HSICVStat.
#
estimatorType = ot.HSICUStat()

# We define a distance function for the weights
#  For the TSA, the critical domain is [5,+inf].
interval = ot.Interval(5, float('inf'))
g = ot.DistanceToDomainFunction(interval)

stdDev = Y.computeStandardDeviation()[0]
foo = ot.SymbolicFunction(["x", "s"], ["exp(-x/s)"])
g2 = ot.ParametricFunction(foo, [1], [0.1 * stdDev])

# The filter function
filterFunction = ot.ComposedFunction(g2, g)

# We eventually build the HSIC object!
TSA = ot.HSICEstimatorTargetSensitivity(covarianceModelCollection, X, Y,
                                        estimatorType, filterFunction)

# We get the R2-HSIC
R2HSIC = TSA.getR2HSICIndices()
ott.assert_almost_equal(R2HSIC, [0.26863688, 0.00468423, 0.00339962])

# and the HSIC indices
HSICIndices = TSA.getHSICIndices()
ott.assert_almost_equal(HSICIndices, [0.00107494, 0.00001868, 0.00001411])
Exemplo n.º 19
0
# the second output of the model, is the standard deviation.

# %%
fullModel = ot.SymbolicFunction(["x", "theta1", "theta2", "theta3"],
                                ["theta1+theta2*x+theta3*x^2", "1.0"])

# %%
# To differentiate between the two classes of inputs (:math:`x` and :math:`\vect\theta`),
# we define a :class:`~openturns.ParametricFunction` from `fullModel`
# and make the first input (the observations :math:`x`) its *parameter*:
# :math:`f_x(\vect \theta) := f(x, \vect \theta)`.
# We set :math:`x = 1` as a placeholder,
# but :math:`x` will actually take the values :math:`x_i` of the observations
# when we sample :math:`\vect\theta`.

linkFunction = ot.ParametricFunction(fullModel, [0], [1.0])
print(linkFunction)

# %%
# Define the observation noise :math:`\varepsilon {\sim} \mathcal N(0, 1)` and create a sample from it.

# %%
ot.RandomGenerator.SetSeed(0)
noiseStandardDeviation = 1.0
noise = ot.Normal(0, noiseStandardDeviation)
noiseSample = noise.getSample(obsSize)

# %%
# Define the vector of observations :math:`y_i`,
# here sampled using the "true" value of :math:`\vect \theta`: :math:`\vect \theta_{true}`.
Exemplo n.º 20
0
import openturns as ot
import otrobopt
from matplotlib import pyplot as plt
from openturns.viewer import View

thetaDist = ot.Normal(2.0, 0.1)
if 'WorstCaseMeasure' == 'WorstCaseMeasure':
    thetaDist = ot.Uniform(-1.0, 4.0)
elif 'ChanceMeasure' in 'WorstCaseMeasure':
    thetaDist = ot.Normal(1.0, 1.0)

f_base = ot.SymbolicFunction(['x', 'theta'], ['x*theta'])
f = ot.ParametricFunction(f_base, [1], thetaDist.getMean())

if 'WorstCaseMeasure' == 'JointChanceMeasure':
    measure = otrobopt.JointChanceMeasure(f, thetaDist, ot.GreaterOrEqual(),
                                          0.95)
elif 'WorstCaseMeasure' == 'IndividualChanceMeasure':
    measure = otrobopt.IndividualChanceMeasure(f, thetaDist,
                                               ot.GreaterOrEqual(), [0.95])
elif 'WorstCaseMeasure' == 'MeanStandardDeviationTradeoffMeasure':
    measure = otrobopt.MeanStandardDeviationTradeoffMeasure(
        f, thetaDist, [0.8])
elif 'WorstCaseMeasure' == 'QuantileMeasure':
    measure = otrobopt.QuantileMeasure(f, thetaDist, 0.99)
else:
    measure = otrobopt.WorstCaseMeasure(f, thetaDist)

N = 10
experiment = ot.LHSExperiment(N)
factory = otrobopt.MeasureFactory(experiment)
Exemplo n.º 21
0
    ot.Uniform(63070.0, 115600.0),
    ot.Uniform(990.0, 1110.0),
    ot.Uniform(63.1, 116.0),
    ot.Uniform(700.0, 820.0),
    ot.Uniform(1120.0, 1680.0),
    ot.Uniform(9855.0, 12045.0)
]
distribution = ot.ComposedDistribution(coll)
distribution.setDescription(input_names)

# %%
# Freeze r, Tu, Tl from model to go faster
selection = [1, 2, 4]
complement = ot.Indices(selection).complement(dimension)
distribution = distribution.getMarginal(complement)
model = ot.ParametricFunction(model, selection,
                              distribution.getMarginal(selection).getMean())
input_names_copy = list(input_names)
input_names = itemgetter(*complement)(input_names)
dimension = len(complement)

# %%
# design of experiment
size = 1000
X = distribution.getSample(size)
Y = model(X)

# %%
# create a functional chaos model
algo = ot.FunctionalChaosAlgorithm(X, Y)
algo.run()
result = algo.getResult()
Exemplo n.º 22
0
#! /usr/bin/env python

import openturns as ot

ot.TESTPREAMBLE()
ot.PlatformInfo.SetNumericalPrecision(5)

m = 10
x = [[0.5 + i] for i in range(m)]

inVars = ["a", "b", "c", "x"]
formulas = ["a + b * exp(c * x)", "(a * x^2 + b) / (c + x^2)"]
g = ot.SymbolicFunction(inVars, formulas)
trueParameter = [2.8, 1.2, 0.5]
params = [0, 1, 2]
model = ot.ParametricFunction(g, params, trueParameter)
y = model(x)
y += ot.Normal([0.0] * 2, [0.05] * 2, ot.IdentityMatrix(2)).getSample(m)
candidate = [1.0] * 3
methods = ["SVD", "QR", "Cholesky"]
for method in methods:
    print("method=", method)
    algo = ot.LinearLeastSquaresCalibration(model, x, y, candidate, method)
    algo.run()
    print("result=", algo.getResult())
import openturns.testing as ott
import math as m
import sys

ot.TESTPREAMBLE()
ot.PlatformInfo.SetNumericalPrecision(3)

m = 10
x = [[0.5 + i] for i in range(m)]

inVars = ["a", "b", "c", "x"]
formulas = ["a + b * exp(c * x)", "(a * x^2 + b) / (c + x^2)"]
model = ot.SymbolicFunction(inVars, formulas)
p_ref = [2.8, 1.2, 0.5]
params = [0, 1, 2]
modelX = ot.ParametricFunction(model, params, p_ref)
y = modelX(x)
y += ot.Normal([0.0] * 2, [0.05] * 2, ot.IdentityMatrix(2)).getSample(m)
candidate = [1.0] * 3
priorCovariance = ot.CovarianceMatrix(3)
for i in range(3):
    priorCovariance[i, i] = 3.0 + (1.0 + i) * (1.0 + i)
    for j in range(i):
        priorCovariance[i, j] = 1.0 / (1.0 + i + j)
errorCovariance = ot.CovarianceMatrix(2)
for i in range(2):
    errorCovariance[i, i] = 2.0 + (1.0 + i) * (1.0 + i)
    for j in range(i):
        errorCovariance[i, j] = 1.0 / (1.0 + i + j)
globalErrorCovariance = ot.CovarianceMatrix(2 * m)
for i in range(2 * m):
Exemplo n.º 24
0
# Then we create a kriging metamodel, using a constant trend and a squared exponential covariance model. 

# %%
basis = ot.ConstantBasisFactory(dimension).build()
covarianceModel = ot.SquaredExponential([0.1]*dimension, [1.0])
algo = ot.KrigingAlgorithm(x, y, covarianceModel, basis)
algo.run()
result = algo.getResult()
metamodel = result.getMetaModel()

# %%
# It is not so easy to visualize a bidimensional function. In order to simplify the graphics, we consider the value of the function at the input :math:`x_{1,ref}=0.5`. This amounts to create a `ParametricFunction` where the first variable :math:`x_1` (at input index 0) is set to :math:`0.5`.

# %%
x1ref = 0.5
metamodelAtXref = ot.ParametricFunction(metamodel, [0], [x1ref])
modelAtXref = ot.ParametricFunction(model, [0], [x1ref])

# %%
# For this given value of :math:`x_1`, we plot the model and the metamodel with :math:`x_2` from its 1% up to its 99% quantile. We configure the X title to "X2" because the default setting would state that this axis is the first value of the parametric function, which default name is "X0".

# %%
x2min = ot.Normal().computeQuantile(0.01)[0]
x2max = ot.Normal().computeQuantile(0.99)[0]
graph = metamodelAtXref.draw(x2min, x2max)
graph.setLegends(["Kriging"])
curve = modelAtXref.draw(x2min, x2max)
curve.setLegends(["Model"])
curve.setColors(['red'])
graph.add(curve)
graph.setLegendPosition("topright")
# center of the approximation
x0 = [-0.4, -0.4]

# drawing bounds
a=-0.4
b=0.0

# %%
# create a linear (first order) Taylor approximation
algo = ot.LinearTaylor(x0, model)
algo.run()
responseSurface = algo.getMetaModel()

# %%
# plot 2nd output of our model with x1=x0_1
graph = ot.ParametricFunction(responseSurface, [0], [x0[1]]).getMarginal(1).draw(a, b)
graph.setLegends(['taylor'])
curve = ot.ParametricFunction(model, [0], [x0[1]]).getMarginal(1).draw(a, b).getDrawable(0)
curve.setColor('red')
curve.setLegend('model')
graph.add(curve)
graph.setLegendPosition('topright')
view = viewer.View(graph)

# %%
# Here is the decomposition at the second order:
#
# .. math::\underline{y} \, \approx \, \widehat{h}(\underline{x}) \, = \,                                                                                                                                                                                    
#         h(\underline{x}_0) \, + \, \sum_{i=1}^{n_{X}} \;                                                                                                                                                                                            
#       \frac{\partial h}{\partial x_i}(\underline{x}_0).\left(x_i - x_{0,i} \right) \, +                                                                                                                                                          
#      \, \frac{1}{2} \; \sum_{i,j=1}^{n_X} \;                                                                                                                                                                                                
Exemplo n.º 26
0
def residualFunctionNoise(params):
    modelx = ot.ParametricFunction(model, [0, 1, 2], params)
    return [modelx(x[i])[0] - ynoise[i, 0] for i in range(m)]
#!/usr/bin/env python

import openturns as ot
import math as m

f = ot.SymbolicFunction(ot.Description.BuildDefault(5, 'x'), [
    'sin(x0) + x1 * cos(x2) / exp(x3)', '-2.0 * x0 + x1 * x2^2 + cos(x3)',
    'x0 / (abs(x1) * x2^2 + x3 + 1.0)'
])
pset = [3, 1]  # x3, x1
parametersSet = True
x = [1.0, 2.0, 0.0]  # x0, x2, x4

# with reference point
referencePoint = [0.85] * 2  # x3, x1
g2 = ot.ParametricFunction(f, pset, referencePoint, parametersSet)
print('g2=', g2)
print('g2(x)=', g2(x))

# with complementary parameter set
referencePoint = [1.0, 2.0, 0.0]  # x0, x2, x4
x = [0.85] * 2  # x3, x1
parametersSet = False
g3 = ot.ParametricFunction(f, pset, referencePoint, parametersSet)
print('g3=', g3)
print('g3(x)=', g3(x))

# From ticket #1092
modelFunc = ot.SymbolicFunction(["s", "r", "c"], ["r + c"])
s = [1.]
r = 1e9
Exemplo n.º 28
0
        result = algo.getResult()
        x_star = result.getOptimalPoint()
        if minimization and algoName != 'STEEPEST_DESCENT':
            ott.assert_almost_equal(x_star, p_ref, 5e-2)
        print(result)

# least-squares optimization
n = 3
m = 10

x = [[0.5 + i] for i in range(m)]

model = ot.SymbolicFunction(['a', 'b', 'c', 'x'],
                            ['a + b * exp(min(500, c * x))'])
p_ref = [2.8, 1.2, 0.5]  # a, b, c
modelx = ot.ParametricFunction(model, [0, 1, 2], p_ref)
y = modelx(x)


def residualFunction_py(p):
    modelx = ot.ParametricFunction(model, [0, 1, 2], p)
    return [modelx(x[i])[0] - y[i, 0] for i in range(m)]


residualFunction = ot.PythonFunction(n, m, residualFunction_py)

bounds = ot.Interval([0, 0, 0], [2.5, 8.0, 19])

for algoName in algoNames:
    line_search = not (algoName in ['LEVENBERG_MARQUARDT', 'DOGLEG'])
    for bound in [True, False]:
Exemplo n.º 29
0
# %%
# Define the value of the reference values of the :math:`\theta` parameter. In the bayesian framework, this is called the mean of the *prior* normal distribution. In the data assimilation framework, this is called the *background*.

# %%
KsInitial = 20.
ZvInitial = 49.
ZmInitial = 51.
thetaPrior = [KsInitial, ZvInitial, ZmInitial]

# %%
# The following statement create the calibrated function from the model. The calibrated parameters :math:`K_s`, :math:`Z_v`, :math:`Z_m` are at indices 1, 2, 3 in the inputs arguments of the model.

# %%
calibratedIndices = [1,2,3]
mycf = ot.ParametricFunction(g, calibratedIndices, thetaPrior)

# %%
# Calibration with linear least squares
# -------------------------------------

# %%
# The `LinearLeastSquaresCalibration` class performs the linear least squares calibration by linearizing the model in the neighbourhood of the reference point.

# %%
algo = ot.LinearLeastSquaresCalibration(mycf, Qobs, Hobs, thetaPrior, "SVD")

# %%
# The `run` method computes the solution of the problem.

# %%
Exemplo n.º 30
0
def residualFunction_py(p):
    modelx = ot.ParametricFunction(model, [0, 1, 2], p)
    return [modelx(x[i])[0] - y[i, 0] for i in range(m)]