def test_stationary_fun():
    # fix https://github.com/openturns/openturns/issues/1861
    ot.RandomGenerator.SetSeed(0)
    rho = ot.SymbolicFunction("tau", "exp(-abs(tau))*cos(2*pi_*abs(tau))")
    model = ot.StationaryFunctionalCovarianceModel([1], [1], rho)
    x = ot.Normal().getSample(20)
    y = x + ot.Normal(0, 0.1).getSample(20)

    algo = ot.KrigingAlgorithm(x, y, model, ot.LinearBasisFactory().build())
    algo.run()
    result = algo.getResult()
    variance = result.getConditionalMarginalVariance(x)
    ott.assert_almost_equal(variance, ot.Sample(len(x), 1), 1e-16, 1e-16)
Beispiel #2
0
    def set_mean(self, mean):
        '''
        This function constructs the mean function
        takes the following argument:
            -> mean_type
        '''

        if mean.mean_type == 'Linear':
            self.mean_function = ot.LinearBasisFactory(self.input_dim).build()
        elif mean.mean_type == 'Constant':
            self.mean_function = ot.ConstantBasisFactory(
                self.input_dim).build()
        elif mean.mean_type == 'Quadratic':
            self.mean_function = ot.QuadraticBasisFactory(
                self.input_dim).build()
        elif mean.mean_type == 'Zero':
            self.mean_function = ot.Basis()
        else:
            self.mean_function = "This library does not support the specified mean function"
Beispiel #3
0
sampleSize = 40
inputDimension = 1

# Create the function to estimate
model = ot.SymbolicFunction(["x0"], ["x0"])

X = ot.Sample(sampleSize, inputDimension)
for i in range(sampleSize):
    X[i, 0] = 3.0 + (8.0 * i) / sampleSize
Y = model(X)

# Add a small noise to data
Y += ot.GaussianProcess(ot.AbsoluteExponential([0.1], [0.2]),
                        ot.Mesh(X)).getRealization().getValues()

basis = ot.LinearBasisFactory(inputDimension).build()
# Case of a misspecified covariance model
covarianceModel = ot.DiracCovarianceModel(inputDimension)
print("===================================================\n")
algo = ot.GeneralLinearModelAlgorithm(X, Y, covarianceModel, basis)
algo.run()

result = algo.getResult()
print("\ncovariance (dirac, optimized)=", result.getCovarianceModel())
print("trend (dirac, optimized)=", result.getTrendCoefficients())
print("===================================================\n")

# Now without estimating covariance parameters
basis = ot.LinearBasisFactory(inputDimension).build()
covarianceModel = ot.DiracCovarianceModel(inputDimension)
algo = ot.GeneralLinearModelAlgorithm(X, Y, covarianceModel, basis, True)
Beispiel #4
0
from matplotlib import pyplot as plt
from openturns.viewer import View
if ot.MaternModel().__class__.__name__ == 'ExponentialModel':
    covarianceModel = ot.ExponentialModel([0.5], [5.0])
elif ot.MaternModel().__class__.__name__ == 'GeneralizedExponential':
    covarianceModel = ot.GeneralizedExponential([2.0], [3.0], 1.5)
elif ot.MaternModel().__class__.__name__ == 'ProductCovarianceModel':
    amplitude = [1.0]
    scale1 = [4.0]
    scale2 = [4.0]
    cov1 = ot.ExponentialModel(scale1, amplitude)
    cov2 = ot.ExponentialModel(scale2, amplitude)
    covarianceModel = ot.ProductCovarianceModel([cov1, cov2])
elif ot.MaternModel().__class__.__name__ == 'RankMCovarianceModel':
    variance = [1.0, 2.0]
    basis = ot.LinearBasisFactory().build()
    covarianceModel = ot.RankMCovarianceModel(variance, basis)
else:
    covarianceModel = ot.MaternModel()
title = str(covarianceModel)[:100]
if covarianceModel.getInputDimension() == 1:
    scale = covarianceModel.getScale()[0]
    if covarianceModel.isStationary():

        def f(x):
            return [covarianceModel(x)[0, 0]]

        func = ot.PythonFunction(1, 1, f)
        func.setDescription(['$tau$', '$cov$'])
        cov_graph = func.draw(-3.0 * scale, 3.0 * scale, 129)
        cov_graph.setTitle(title)
Beispiel #5
0
# %%
print(result.getTrendCoefficients())

# %%
# The constant trend always has only one coefficient (if there is one single output).

# %%
print(result.getCovarianceModel())

# %%
# Setting the trend
# -----------------

# %%
covarianceModel.setScale(X_train.getMax())
basis = ot.LinearBasisFactory(dimension).build()
algo = ot.KrigingAlgorithm(X_train, Y_train, covarianceModel, basis)
algo.setOptimizationBounds(scaleOptimizationBounds)
algo.run()
result = algo.getResult()
krigingWithLinearTrend = result.getMetaModel()
result.getTrendCoefficients()

# %%
# The number of coefficients in the linear and quadratic trends depends on the number of inputs, which is
# equal to
#
# .. math::
#    dim = 4
#
#
    X2 = ot.NumericalSample(sampleSize, spatialDimension)
    for i in range(sampleSize):
        X[i, 0] = 3.0 + i
        X2[i, 0] = 2.5 + i
    X[0, 0] = 1.0
    X[1, 0] = 3.0
    X2[0, 0] = 2.0
    X2[1, 0] = 4.0
    Y = model(X)
    # Data validation
    Y2 = model(X2)
    for i in range(sampleSize):
        # Add a small noise to data
        Y[i, 0] += 0.01 * ot.DistFunc.rNormal()

    basis = ot.LinearBasisFactory(spatialDimension).build()
    covarianceModel = ot.DiracCovarianceModel(spatialDimension)
    algo = ot.GeneralizedLinearModelAlgorithm(X, Y, covarianceModel, basis)
    algo.run()

    # perform an evaluation
    result = algo.getResult()
    metaModel = result.getMetaModel()
    conditionalCovariance = result.getCovarianceModel()
    residual = metaModel(X) - Y
    assert_almost_equal(residual.computeCenteredMoment(2), [0.00013144], 1e-5,
                        1e-5)
    assert_almost_equal(conditionalCovariance.getParameter(),
                        [0.011464782674211804], 1e-5, 1e-3)
    print("Test Ok")
dim = 2

x = [2.0 + i for i in range(dim)]

print("x=", x)

factory = ot.ConstantBasisFactory(dim)
print("factory=", factory)
basis = factory.build()
print("basis=", basis)

f = ot.AggregatedFunction(basis)
y = f(x)
print("y=", y)

factory = ot.LinearBasisFactory(dim)
print("factory=", factory)
basis = factory.build()
print("basis=", basis)

f = ot.AggregatedFunction(basis)
y = f(x)
print("y=", y)

factory = ot.QuadraticBasisFactory(dim)
print("factory=", factory)
basis = factory.build()
print("basis=", basis)

f = ot.AggregatedFunction(basis)
y = f(x)
myStudy.add(model)

# Design of Experiment ##
aDesign = persalys.FixedDesignOfExperiment('design', model)
validationInputSample = ot.LHSExperiment(model.getDistribution(),
                                         10).generate()
inputSample = ot.Sample(validationInputSample)
inputSample.stack(ot.Sample(10, [0.5]))
aDesign.setOriginalInputSample(inputSample)
myStudy.add(aDesign)

aDesign.run()

# Kriging ##
analysis = persalys.KrigingAnalysis('kriging_0', aDesign)
analysis.setBasis(ot.LinearBasisFactory(2).build())
analysis.setCovarianceModel(ot.MaternModel(2))
myStudy.add(analysis)
print(analysis)

analysis.run()
metaModel = analysis.getResult().getResultForVariable('y0').getMetaModel()
openturns.testing.assert_almost_equal(
    aDesign.getResult().getDesignOfExperiment().getOutputSample(),
    metaModel(validationInputSample), 3.0e-5, 3.0e-5)

# Design of Experiment ##
model.addOutput(persalys.Output('y1'))
model.setFormula('y1', formula_y0 + ' + xi3')
aDesign.setInterestVariables(['y0', 'y1'])
aDesign.run()
Beispiel #9
0
model3 = persalys.DataModel('model3', 'data.csv', [0, 2, 3], [1],
                            ['x_0', 'x_2', 'x_3'], ['x_1'])
myStudy.add(model3)

# Design of Experiment ##

probaDesign = persalys.ProbabilisticDesignOfExperiment('probaDesign', model1,
                                                       20, "MONTE_CARLO")
probaDesign.run()
myStudy.add(probaDesign)

# 1- meta model1 ##

# 1-a Kriging ##
kriging = persalys.KrigingAnalysis('kriging', probaDesign)
kriging.setBasis(ot.LinearBasisFactory(2).build())
kriging.setCovarianceModel(ot.MaternModel(2))
kriging.setTestSampleValidation(True)
kriging.setKFoldValidation(True)
kriging.setInterestVariables(['y0', 'y1'])
myStudy.add(kriging)

# 1-b Chaos ##
chaos1 = persalys.FunctionalChaosAnalysis('chaos_1', probaDesign)
chaos1.setChaosDegree(7)
chaos1.setSparseChaos(True)
chaos1.setTestSampleValidation(True)
chaos1.setKFoldValidation(True)
chaos1.setInterestVariables(['y1'])
myStudy.add(chaos1)