def computeValues(Estimatortype, OutputCov, SA, weightf=None):
    """Initialization"""
    if weightf == "Exp":
        weightFunction = HSICSAWeightFunctions.HSICSAExponentialWeightFunction(
            C,
            [0.5, outputSample.computeStandardDeviation()[0]
             ]  # ATTENTION, THIS VARIES DEPENDING ON THE PACKAGE VERSION!
        )
    elif weightf == "Ind":
        weightFunction = HSICSAWeightFunctions.HSICSAStepWeightFunction(C)

    if SA == "GSA" or "CSA":
        y_covariance = ot.SquaredExponential()
        y_covariance.setScale([outputSample.computeStandardDeviation()[0]
                               ])  # Gaussian kernel parameterization

    if SA == "TSA":
        if OutputCov == "Exp":
            y_covariance = ot.SquaredExponential()
            yw = weightFunction.function(outputSample)
            y_covariance.setScale([np.std(yw, ddof=1)
                                   ])  # Gaussian kernel parameterization
        elif OutputCov == "Kron":
            y_covariance = kronCov

    CovarianceList = [x_covariance_collection, y_covariance]

    if SA == "GSA":
        Estimator = HSICEstimators.GSAHSICEstimator(CovarianceList,
                                                    inputSample, outputSample,
                                                    Estimatortype)

    if SA == "TSA":
        Estimator = HSICEstimators.TSAHSICEstimator(CovarianceList,
                                                    inputSample, outputSample,
                                                    weightFunction,
                                                    Estimatortype)

    if SA == "CSA":
        Estimator = HSICEstimators.CSAHSICEstimator(CovarianceList,
                                                    inputSample, outputSample,
                                                    weightFunction,
                                                    Estimatortype)
    """Testing"""

    print('R2-HSIC : ', Estimator.getR2HSICIndices())
    print('\n')
    print('HSIC : ', Estimator.getHSICIndices())
    print('\n')
    if SA != 'CSA':
        print('p-valeurs asymptotiques : ', Estimator.getPValuesAsymptotic())
        print('\n')
    Estimator.setPermutationBootstrapSize(1000)
    print('p-valeurs permutation : ', Estimator.getPValuesPermutation())
    print('\n')
    def test_SquaredExponential(self):
        # With 2 principal components
        setup_HDRenv()
        # Test with no outlier in the band
        xmin = 0.0
        step = 0.1
        n = 100
        timeGrid = ot.RegularGrid(xmin, step, n + 1)
        amplitude = [7.0]
        scale = [1.5]
        covarianceModel = ot.SquaredExponential(scale, amplitude)
        process = ot.GaussianProcess(covarianceModel, timeGrid)
        nbTrajectories = 50
        processSample = process.getSample(nbTrajectories)
        # KL decomposition
        reduction = othdr.KarhunenLoeveDimensionReductionAlgorithm(
            processSample, 2)
        reduction.run()
        reducedComponents = reduction.getReducedComponents()

        # Distribution fit in reduced space
        ks = ot.KernelSmoothing()
        reducedDistribution = ks.build(reducedComponents)
        hdr = othdr.ProcessHighDensityRegionAlgorithm(processSample,
                                                      reducedComponents,
                                                      reducedDistribution,
                                                      [0.95, 0.5])
        hdr.run()
        graph = hdr.draw()
        otv.View(graph)
Exemple #3
0
 def test_NonZeroMean(self):
     # Create the KL result
     numberOfVertices = 10
     interval = ot.Interval(-1.0, 1.0)
     mesh = ot.IntervalMesher([numberOfVertices - 1]).build(interval)
     covariance = ot.SquaredExponential()
     zeroProcess = ot.GaussianProcess(covariance, mesh)
     # Define a trend function
     f = ot.SymbolicFunction(["t"], ["30 * t"])
     fTrend = ot.TrendTransform(f, mesh)
     # Add it to the process
     process = ot.CompositeProcess(fTrend, zeroProcess)
     # Sample
     sampleSize = 100
     processSample = process.getSample(sampleSize)
     threshold = 0.0
     algo = ot.KarhunenLoeveSVDAlgorithm(processSample, threshold)
     algo.run()
     klresult = algo.getResult()
     # Create the KL reduction
     meanField = processSample.computeMean()
     klreduce = ot.KarhunenLoeveReduction(klresult)
     # Generate a trajectory and reduce it
     field = process.getRealization()
     values = field.getValues()
     reducedValues = klreduce(values)
     ott.assert_almost_equal(values, reducedValues)
Exemple #4
0
    def _buildKrigingAlgo(self, inputSample, outputSample):
        """
        Build the functional chaos algorithm without running it.
        """
        if self._basis is None:
            # create linear basis only for the defect parameter (1st parameter),
            # constant otherwise
            input = ['x' + str(i) for i in range(self._dim)]
            functions = []
            # constant
            functions.append(ot.SymbolicFunction(input, ['1']))
            # linear for the first parameter only
            functions.append(ot.SymbolicFunction(input, [input[0]]))
            self._basis = ot.Basis(functions)

        if self._covarianceModel is None:
            # anisotropic squared exponential covariance model
            self._covarianceModel = ot.SquaredExponential([1] * self._dim)

        # normalization
        mean = inputSample.computeMean()
        try:
            stddev = inputSample.computeStandardDeviation()
        except AttributeError:
            stddev = inputSample.computeStandardDeviationPerComponent()
        linear = ot.SquareMatrix(self._dim)
        for j in range(self._dim):
            linear[j, j] = 1.0 / stddev[j] if abs(stddev[j]) > 1e-12 else 1.0
        zero = [0.0] * self._dim
        transformation = ot.LinearFunction(mean, zero, linear)

        algoKriging = ot.KrigingAlgorithm(transformation(inputSample),
                                          outputSample, self._covarianceModel,
                                          self._basis)
        return algoKriging, transformation
def test_two_outputs():
    f = ot.SymbolicFunction(['x'], ['x * sin(x)', 'x * cos(x)'])
    sampleX = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]
    sampleY = f(sampleX)
    basis = ot.Basis([
        ot.SymbolicFunction(['x'], ['x']),
        ot.SymbolicFunction(['x'], ['x^2'])
    ])
    covarianceModel = ot.SquaredExponential([1.0])
    covarianceModel.setActiveParameter([])
    covarianceModel = ot.TensorizedCovarianceModel([covarianceModel] * 2)
    algo = ot.KrigingAlgorithm(sampleX, sampleY, covarianceModel, basis)
    algo.run()
    result = algo.getResult()
    mm = result.getMetaModel()
    assert mm.getOutputDimension() == 2, "wrong output dim"
    ott.assert_almost_equal(mm(sampleX), sampleY)
    # Check the conditional covariance
    reference_covariance = ot.Matrix([[4.4527, 0.0, 8.34404, 0.0],
                                      [0.0, 2.8883, 0.0, 5.41246],
                                      [8.34404, 0.0, 15.7824, 0.0],
                                      [0.0, 5.41246, 0.0, 10.2375]])
    ott.assert_almost_equal(
        result([[9.5], [10.0]]).getCovariance() - reference_covariance,
        ot.Matrix(4, 4), 0.0, 2e-2)
def test_two_inputs_one_output():
    # Kriging use case
    inputDimension = 2

    # Learning data
    levels = [8, 5]
    box = ot.Box(levels)
    inputSample = box.generate()
    # Scale each direction
    inputSample *= 10.0

    model = ot.SymbolicFunction(['x', 'y'], ['cos(0.5*x) + sin(y)'])
    outputSample = model(inputSample)

    # Validation
    sampleSize = 10
    inputValidSample = ot.ComposedDistribution(
        2 * [ot.Uniform(0, 10.0)]).getSample(sampleSize)
    outputValidSample = model(inputValidSample)

    # 2) Definition of exponential model
    # The parameters have been calibrated using TNC optimization
    # and AbsoluteExponential models
    scales = [5.33532, 2.61534]
    amplitude = [1.61536]
    covarianceModel = ot.SquaredExponential(scales, amplitude)

    # 3) Basis definition
    basis = ot.ConstantBasisFactory(inputDimension).build()

    # 4) Kriging algorithm
    algo = ot.KrigingAlgorithm(inputSample, outputSample, covarianceModel,
                               basis)
    algo.run()

    result = algo.getResult()
    # Get meta model
    metaModel = result.getMetaModel()
    outData = metaModel(inputValidSample)

    # 5) Errors
    # Interpolation
    ott.assert_almost_equal(outputSample, metaModel(inputSample), 3.0e-5,
                            3.0e-5)

    # 6) Kriging variance is 0 on learning points
    covariance = result.getConditionalCovariance(inputSample)
    ott.assert_almost_equal(covariance, ot.SquareMatrix(len(inputSample)),
                            7e-7, 7e-7)

    # Covariance per marginal & extract variance component
    coll = result.getConditionalMarginalCovariance(inputSample)
    var = [mat[0, 0] for mat in coll]
    ott.assert_almost_equal(var, [0] * len(var), 0.0, 1e-13)

    # Variance per marginal
    var = result.getConditionalMarginalVariance(inputSample)
    ott.assert_almost_equal(var, ot.Point(len(inputSample)), 0.0, 1e-13)
    # Estimation
    ott.assert_almost_equal(outputValidSample, outData, 1.e-1, 1e-1)
 def test_KarhunenLoeveValidationMultidimensional(self):
     # Create the KL result
     numberOfVertices = 20
     interval = ot.Interval(-1.0, 1.0)
     mesh = ot.IntervalMesher([numberOfVertices - 1]).build(interval)
     outputDimension = 2
     univariateCovariance = ot.SquaredExponential()
     covarianceCollection = [univariateCovariance] * outputDimension
     multivariateCovariance = ot.TensorizedCovarianceModel(
         covarianceCollection)
     process = ot.GaussianProcess(multivariateCovariance, mesh)
     sampleSize = 100
     sampleSize = 10
     processSample = process.getSample(sampleSize)
     threshold = 1.0e-7
     algo = ot.KarhunenLoeveSVDAlgorithm(processSample, threshold)
     algo.run()
     klresult = algo.getResult()
     # Create the validation
     validation = ot.KarhunenLoeveValidation(processSample, klresult)
     # Check residuals
     residualProcessSample = validation.computeResidual()
     assert (type(residualProcessSample) is ot.ProcessSample)
     # Check standard deviation
     residualSigmaField = validation.computeResidualStandardDeviation()
     zeroSample = ot.Sample(numberOfVertices, outputDimension)
     ott.assert_almost_equal(residualSigmaField, zeroSample)
     # Check graph
     graph = validation.drawValidation()
     if False:
         from openturns.viewer import View
         View(graph).save('validation2.png')
Exemple #8
0
 def get_kernel_function(self, kernel, name):
     '''
     kernel : dictionary of parameters
     name : name of the kernel
     '''
     if self.input_dim == 1:
         if name == 'Matern':
             return (ot.MaternModel([float(kernel[name]['lengthscale'])],
                                    [kernel[name]['scale']],
                                    float(kernel[name]['order'])))
         elif name == 'RBF':
             return (ot.SquaredExponential(
                 [float(kernel[name]['lengthscale'])],
                 [kernel[name]['scale']]))
         elif name == 'White':
             return (
                 'Not sure whether this library supports the specified kernel type'
             )
         elif name == 'Const':
             return (
                 'Not sure whether this library supports the specified kernel type'
             )
         elif name == 'RatQd':
             return (
                 'Not sure whether this library supports the specified kernel type'
             )
     else:
         if name == 'Matern':
             return (ot.MaternModel(kernel[name]['lengthscale'],
                                    [kernel[name]['scale']],
                                    float(kernel[name]['order'])))
         elif name == 'RBF':
             return (ot.SquaredExponential(kernel[name]['lengthscale'],
                                           [kernel[name]['scale']]))
         elif name == 'White':
             return (
                 'Not sure whether this library supports the specified kernel type'
             )
         elif name == 'Const':
             return (
                 'Not sure whether this library supports the specified kernel type'
             )
         elif name == 'RatQd':
             return (
                 'Not sure whether this library supports the specified kernel type'
             )
def test_two_outputs():
    f = ot.SymbolicFunction(['x'], ['x * sin(x)', 'x * cos(x)'])
    sampleX = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]
    sampleY = f(sampleX)
    basis = ot.Basis([ot.SymbolicFunction(['x'], ['x']),
                      ot.SymbolicFunction(['x'], ['x^2'])])
    covarianceModel = ot.SquaredExponential([1.0])
    covarianceModel.setActiveParameter([])
    algo = ot.KrigingAlgorithm(sampleX, sampleY, covarianceModel, basis)
    algo.run()
    result = algo.getResult()
    mm = result.getMetaModel()
    assert mm.getOutputDimension() == 2, "wrong output dim"
    ott.assert_almost_equal(mm(sampleX), sampleY)
Exemple #10
0
    def _buildKrigingAlgo(self, inputSample, outputSample):
        """
        Build the functional chaos algorithm without running it.
        """
        if self._basis is None:
            # create linear basis only for the defect parameter (1st parameter),
            # constant otherwise
            input = ['x' + str(i) for i in range(self._dim)]
            functions = []
            # constant
            functions.append(ot.NumericalMathFunction(input, ['y'], ['1']))
            # linear for the first parameter only
            functions.append(ot.NumericalMathFunction(input, ['y'],
                                                      [input[0]]))
            self._basis = ot.Basis(functions)

        if self._covarianceModel is None:
            # anisotropic squared exponential covariance model
            covColl = ot.CovarianceModelCollection(self._dim)
            for i in range(self._dim):
                if LooseVersion(ot.__version__) == '1.6':
                    covColl[i] = ot.SquaredExponential(1, 1.)
                elif LooseVersion(ot.__version__) > '1.6':
                    covColl[i] = ot.SquaredExponential([1], [1.])
            self._covarianceModel = ot.ProductCovarianceModel(covColl)

        if LooseVersion(ot.__version__) == "1.9":
            algoKriging = ot.KrigingAlgorithm(inputSample, outputSample,
                                              self._covarianceModel,
                                              self._basis)
        else:
            algoKriging = ot.KrigingAlgorithm(inputSample, outputSample,
                                              self._basis,
                                              self._covarianceModel, True)
        algoKriging.run()
        return algoKriging
def test_one_input_one_output():
    sampleSize = 6
    dimension = 1

    f = ot.SymbolicFunction(['x0'], ['x0 * sin(x0)'])

    X = ot.Sample(sampleSize, dimension)
    X2 = ot.Sample(sampleSize, dimension)
    for i in range(sampleSize):
        X[i, 0] = 3.0 + i
        X2[i, 0] = 2.5 + i
    X[0, 0] = 1.0
    X[1, 0] = 3.0
    X2[0, 0] = 2.0
    X2[1, 0] = 4.0
    Y = f(X)
    Y2 = f(X2)

    # create algorithm
    basis = ot.ConstantBasisFactory(dimension).build()
    covarianceModel = ot.SquaredExponential([1e-02], [4.50736])

    algo = ot.KrigingAlgorithm(X, Y, covarianceModel, basis)
    algo.run()

    # perform an evaluation
    result = algo.getResult()

    ott.assert_almost_equal(result.getMetaModel()(X), Y)
    ott.assert_almost_equal(result.getResiduals(), [1.32804e-07], 1e-3, 1e-3)
    ott.assert_almost_equal(result.getRelativeErrors(), [5.20873e-21])

    # Kriging variance is 0 on learning points
    covariance = result.getConditionalCovariance(X)
    covariancePoint = ot.Point(covariance.getImplementation())
    theoricalVariance = ot.Point(sampleSize * sampleSize)
    ott.assert_almost_equal(covariance,
                            ot.Matrix(sampleSize, sampleSize),
                            8.95e-7, 8.95e-7)

    # Covariance per marginal & extract variance component
    coll = result.getConditionalMarginalCovariance(X)
    var = [mat[0, 0] for mat in coll]
    ott.assert_almost_equal(var, [0]*sampleSize, 1e-14, 1e-14)

    # Variance per marginal
    var = result.getConditionalMarginalVariance(X)
    ott.assert_almost_equal(var, ot.Point(sampleSize), 1e-14, 1e-14)
 def test_KarhunenLoeveValidation(self):
     # Create the KL result
     numberOfVertices = 20
     interval = ot.Interval(-1.0, 1.0)
     mesh = ot.IntervalMesher([numberOfVertices - 1]).build(interval)
     covariance = ot.SquaredExponential()
     process = ot.GaussianProcess(covariance, mesh)
     sampleSize = 100
     processSample = process.getSample(sampleSize)
     threshold = 1.0e-7
     algo = ot.KarhunenLoeveSVDAlgorithm(processSample, threshold)
     algo.run()
     klresult = algo.getResult()
     # Create validation
     validation = ot.KarhunenLoeveValidation(processSample, klresult)
     # Check residuals
     residualProcessSample = validation.computeResidual()
     assert (type(residualProcessSample) is ot.ProcessSample)
     # Check standard deviation
     residualSigmaField = validation.computeResidualStandardDeviation()
     exact = ot.Sample(numberOfVertices, 1)
     #ott.assert_almost_equal(residualSigmaField, exact)
     # Check mean
     residualMean = validation.computeResidualMean()
     exact = ot.Sample(numberOfVertices, 1)
     #ott.assert_almost_equal(residualMean, exact)
     # Check graph
     graph0 = validation.drawValidation()
     graph1 = residualProcessSample.drawMarginal(0)
     graph2 = residualMean.drawMarginal(0)
     graph3 = residualSigmaField.drawMarginal(0)
     graph4 = validation.drawObservationWeight(0)
     graph5 = validation.drawObservationQuality()
     if 0:
         from openturns.viewer import View
         View(graph0).save('validation1.png')
         View(graph1).save('validation1-residual.png')
         View(graph2).save('validation1-residual-mean.png')
         View(graph3).save('validation1-residual-stddev.png')
         View(graph4).save('validation1-indiv-weight.png')
         View(graph5).save('validation1-indiv-quality.png')
Exemple #13
0
 def test_ZeroMean(self):
     # Create the KL result
     numberOfVertices = 10
     interval = ot.Interval(-1.0, 1.0)
     mesh = ot.IntervalMesher([numberOfVertices - 1]).build(interval)
     covariance = ot.SquaredExponential()
     process = ot.GaussianProcess(covariance, mesh)
     sampleSize = 10
     processSample = process.getSample(sampleSize)
     threshold = 0.0
     algo = ot.KarhunenLoeveSVDAlgorithm(processSample, threshold)
     algo.run()
     klresult = algo.getResult()
     # Create the KL reduction
     meanField = processSample.computeMean()
     klreduce = ot.KarhunenLoeveReduction(klresult)
     # Generate a trajectory and reduce it
     field = process.getRealization()
     values = field.getValues()
     reducedValues = klreduce(values)
     ott.assert_almost_equal(values, reducedValues)
Exemple #14
0
 def test_trend(self):
     N = 100
     M = 1000
     P = 10
     mean = ot.SymbolicFunction("x", "sign(x)")
     cov = ot.SquaredExponential([1.0], [0.1])
     mesh = ot.IntervalMesher([N]).build(ot.Interval(-2.0, 2.0))
     process = ot.GaussianProcess(ot.TrendTransform(mean, mesh), cov, mesh)
     sample = process.getSample(M)
     algo = ot.KarhunenLoeveSVDAlgorithm(sample, 1e-6)
     algo.run()
     result = algo.getResult()
     trend = ot.TrendTransform(
         ot.P1LagrangeEvaluation(sample.computeMean()), mesh)
     sample2 = process.getSample(P)
     sample2.setName('reduction of sign(x) w/o trend')
     reduced1 = ot.KarhunenLoeveReduction(result)(sample2)
     reduced2 = ot.KarhunenLoeveReduction(result, trend)(sample2)
     g = sample2.drawMarginal(0)
     g.setColors(["red"])
     g1 = reduced1.drawMarginal(0)
     g1.setColors(["blue"])
     drs = g1.getDrawables()
     for i, d in enumerate(drs):
         d.setLineStyle("dashed")
         drs[i] = d
     g1.setDrawables(drs)
     g.add(g1)
     g2 = reduced2.drawMarginal(0)
     g2.setColors(["green"])
     drs = g2.getDrawables()
     for i, d in enumerate(drs):
         d.setLineStyle("dotted")
         drs[i] = d
     g2.setDrawables(drs)
     g.add(g2)
     if 0:
         from openturns.viewer import View
         View(g).save('reduction.png')
Exemple #15
0
# Setting the covariance models
# -----------------------------
#
# The HSIC algorithms use reproducing kernels defined on Hilbert spaces to estimate independence.
# For each input variable we choose a covariance kernel.
# Here we choose a :class:`~openturns.SquaredExponential`
# kernel for all input variables.
#
# They are all stored in a list of :math:`d+1` covariance kernels where :math:`d` is the number of
# input variables. The remaining one is for the output variable.
covarianceModelCollection = []

# %%
for i in range(3):
    Xi = X.getMarginal(i)
    inputCovariance = ot.SquaredExponential(1)
    inputCovariance.setScale(Xi.computeStandardDeviation())
    covarianceModelCollection.append(inputCovariance)

# %%
# Likewise we define a covariance kernel associated to the output variable.
outputCovariance = ot.SquaredExponential(1)
outputCovariance.setScale(Y.computeStandardDeviation())
covarianceModelCollection.append(outputCovariance)

# %%
# The Global HSIC estimator
# -------------------------
#
# In this paragraph, we perform the analysis on the raw data: that is
# the global HSIC estimator.
    # Kriging use case
    spatialDimension = 2

    # Learning data
    levels = [8, 5]
    box = ot.Box(levels)
    inputSample = box.generate()
    # Scale each direction
    inputSample *= 10

    # Define model
    model = ot.Function(['x', 'y'], ['z'], ['cos(0.5*x) + sin(y)'])
    outputSample = model(inputSample)

    # 2) Definition of exponential model
    covarianceModel = ot.SquaredExponential([1.988, 0.924], [3.153])

    # 3) Basis definition
    basisCollection = ot.BasisCollection(
        1,
        ot.ConstantBasisFactory(spatialDimension).build())

    # Kriring algorithm
    algo = ot.KrigingAlgorithm(inputSample, outputSample, covarianceModel,
                               basisCollection)
    algo.run()
    result = algo.getResult()

    vertices = [[1.0, 0.0], [2.0, 0.0], [2.0, 1.0], [1.0, 1.0], [1.5, 0.5]]
    simplicies = [[0, 1, 4], [1, 2, 4], [2, 3, 4], [3, 0, 4]]
Exemple #17
0
# In this example we are going to assess a Karhunen-Loeve decomposition
#

# %%
from __future__ import print_function
import openturns as ot
import openturns.viewer as viewer
from matplotlib import pylab as plt
ot.Log.Show(ot.Log.NONE)

# %%
# Create a Gaussian process
numberOfVertices = 20
interval = ot.Interval(-1.0, 1.0)
mesh = ot.IntervalMesher([numberOfVertices - 1]).build(interval)
covariance = ot.SquaredExponential()
process = ot.GaussianProcess(covariance, mesh)

# %%
# decompose it using KL-SVD
sampleSize = 100
processSample = process.getSample(sampleSize)
threshold = 1.0e-7
algo = ot.KarhunenLoeveSVDAlgorithm(processSample, threshold)
algo.run()
klresult = algo.getResult()

# %%
# Instanciate the validation service
validation = ot.KarhunenLoeveValidation(processSample, klresult)
model = ot.ComposedFunction(branin, transfo)

# problem
problem = ot.OptimizationProblem()
problem.setObjective(model)
bounds = ot.Interval([0.0] * dim, [1.0] * dim)
problem.setBounds(bounds)

# design
experiment = ot.Box([1, 1])
inputSample = experiment.generate()
modelEval = model(inputSample)
outputSample = modelEval.getMarginal(0)

# first kriging model
covarianceModel = ot.SquaredExponential([0.3007, 0.2483], [0.981959])
basis = ot.ConstantBasisFactory(dim).build()
kriging = ot.KrigingAlgorithm(inputSample, outputSample, covarianceModel,
                              basis)
noise = list(map(lambda x: x[1], modelEval))
kriging.setNoise(noise)
kriging.run()

# algo
algo = ot.EfficientGlobalOptimization(problem, kriging.getResult())
algo.setNoiseModel(ot.SymbolicFunction(['x1', 'x2'],
                                       ['0.96']))  # assume constant noise var
algo.setMaximumIterationNumber(20)
algo.setImprovementFactor(
    0.05)  # stop whe improvement is < a% the current optimum
algo.setAEITradeoff(0.66744898)
Exemple #19
0
            currentValue = ot.Point(localCovariance.getImplementation())
            for j in range(currentValue.getSize()):
                gradfd[i, j] = (currentValue[j] - centralValue[j]) / eps
    print('dCov (FD)=', repr(gradfd))

    if test_grad:
        pGrad = myModel.parameterGradient(x1, x2)
        precision = ot.PlatformInfo.GetNumericalPrecision()
        ot.PlatformInfo.SetNumericalPrecision(4)
        print('dCov/dP=', pGrad)
        ot.PlatformInfo.SetNumericalPrecision(precision)


inputDimension = 2

myDefautModel = ot.SquaredExponential([2.0], [3.0])
print('myDefautModel = ', myDefautModel)
test_model(myDefautModel)

myModel = ot.SquaredExponential([2.0] * inputDimension, [3.0])
test_model(myModel)

myDefautModel = ot.GeneralizedExponential([2.0], [3.0], 1.5)
print('myDefautModel = ', myDefautModel)
test_model(myDefautModel)

myModel = ot.GeneralizedExponential([2.0] * inputDimension, [3.0], 1.5)
test_model(myModel)

myDefautModel = ot.AbsoluteExponential([2.0], [3.0])
print('myDefautModel = ', myDefautModel)
Exemple #20
0
    # Gradient testing
    eps = 1e-5

    grad = myModel.partialGradient([x1], [x2])[0, 0]

    x1_g = x1 + eps
    x1_d = x1 - eps
    gradfd = (myModel.computeAsScalar(x1_g, x2) -
              myModel.computeAsScalar(x1_d, x2)) / (2.0 * eps)
    ott.assert_almost_equal(gradfd, grad, 1e-5, 1e-5)


inputDimension = 2

# 1) SquaredExponential
myModel = ot.SquaredExponential([2.0], [3.0])
ott.assert_almost_equal(myModel.getScale(), [2], 0, 0)
ott.assert_almost_equal(myModel.getAmplitude(), [3], 0, 0)
test_model(myModel)

myModel = ot.SquaredExponential([2.0] * inputDimension, [3.0])
ott.assert_almost_equal(myModel.getScale(), [2, 2], 0, 0)
ott.assert_almost_equal(myModel.getAmplitude(), [3], 0, 0)
test_model(myModel)

# 2) GeneralizedExponential
myModel = ot.GeneralizedExponential([2.0], [3.0], 1.5)
ott.assert_almost_equal(myModel.getScale(), [2], 0, 0)
ott.assert_almost_equal(myModel.getAmplitude(), [3], 0, 0)
ott.assert_almost_equal(myModel.getP(), 1.5, 0, 0)
test_model(myModel)
Exemple #21
0
# %%
# We generate a simple Monte-Carlo input sample and evaluate the corresponding output sample.

# %%
distribution = ot.Normal(dimension)
samplesize = 15
x  = distribution.getSample(samplesize)
y = model(x)

# %%
# Then we create a kriging metamodel, using a constant trend and a squared exponential covariance model. 

# %%
basis = ot.ConstantBasisFactory(dimension).build()
covarianceModel = ot.SquaredExponential([0.1]*dimension, [1.0])
algo = ot.KrigingAlgorithm(x, y, covarianceModel, basis)
algo.run()
result = algo.getResult()
metamodel = result.getMetaModel()

# %%
# It is not so easy to visualize a bidimensional function. In order to simplify the graphics, we consider the value of the function at the input :math:`x_{1,ref}=0.5`. This amounts to create a `ParametricFunction` where the first variable :math:`x_1` (at input index 0) is set to :math:`0.5`.

# %%
x1ref = 0.5
metamodelAtXref = ot.ParametricFunction(metamodel, [0], [x1ref])
modelAtXref = ot.ParametricFunction(model, [0], [x1ref])

# %%
# For this given value of :math:`x_1`, we plot the model and the metamodel with :math:`x_2` from its 1% up to its 99% quantile. We configure the X title to "X2" because the default setting would state that this axis is the first value of the parametric function, which default name is "X0".
Exemple #22
0
# %%
import openturns as ot
from openturns.viewer import View

# %%
# First build a process to generate the input data.
# We assemble a 4-d process from functional and Gaussian processes.
T = 3.0
NT = 32
tg = ot.RegularGrid(0.0, T / NT, NT)
f1 = ot.SymbolicFunction(['t'], ['sin(t)'])
f2 = ot.SymbolicFunction(['t'], ['cos(t)^2'])
coeff1_dist = ot.Normal([1.0] * 2, [0.6] * 2, ot.CorrelationMatrix(2))
p1 = ot.FunctionalBasisProcess(coeff1_dist, ot.Basis([f1, f2]), tg)
p2 = ot.GaussianProcess(ot.SquaredExponential([1.0], [T / 4.0]), tg)
coeff3_dist = ot.ComposedDistribution([ot.Uniform(), ot.Normal()])
f1 = ot.SymbolicFunction(["t"], ["1", "0"])
f2 = ot.SymbolicFunction(["t"], ["0", "1"])
p3 = ot.FunctionalBasisProcess(coeff3_dist, ot.Basis([f1, f2]))
X = ot.AggregatedProcess([p1, p2, p3])
X.setMesh(tg)

# %%
# Draw some input trajectories from our process
ot.RandomGenerator.SetSeed(0)
x = X.getSample(10)
graph = x.drawMarginal(0)
graph.setTitle(f'{x.getSize()} input trajectories')
_ = View(graph)
Exemple #23
0
# Kriging algorithm
algo = ot.KrigingAlgorithm(inputSample, outputSample, covarianceModel, basis)
start = [50.0] * inputDimension
loglikelihood = algo.getReducedLogLikelihoodFunction()(start)
algo.setOptimizeParameters(False)
algo.run()
result = algo.getResult()
metaModel = result.getMetaModel()
variance = result.getConditionalMarginalVariance(inputSample)
ott.assert_almost_equal(variance, ot.Sample(
    inputSample.getSize(), 1), 1e-14, 1e-14)


# Consistency check: does the reimplementation fit the SquaredExponential class?
squaredExponential = ot.SquaredExponential(inputDimension)
squaredExponential.setParameter([6.0, 2.0, 1.5])
algoSE = ot.KrigingAlgorithm(
    inputSample, outputSample, squaredExponential, basis)
loglikelihoodSE = algoSE.getReducedLogLikelihoodFunction()(start)
ott.assert_almost_equal(loglikelihood, loglikelihoodSE, 1e-8, 1e-8)

# High level consistency check: does the prediction fit too?
algoSE.setOptimizeParameters(False)
algoSE.run()
resultSE = algoSE.getResult()
metaModelSE = resultSE.getMetaModel()
ott.assert_almost_equal(metaModel(inputValidSample),
                        metaModelSE(inputValidSample), 1e-8, 1e-8)

# Validate the metamodel
# .. math::
#    C_{\theta}(s,t) = \sigma^2 e^{ - \frac{(t-s)^2}{2\rho^2} }
#
# where :math:`\theta = (\sigma, \rho)` is the hyperparameters vectors. The nature of the covariance
# model is fixed but its parameters are calibrated during the kriging process. We want to choose them
# so as to best fit our data.
#
# Eventually the kriging (meta)model :math:`\hat{Y}(x)` reads as
#
# .. math::
#    \hat{Y}(x) = m(x) + Z(x)
#
# where :math:`m(.)` is the trend and :math:`Z(.)` is a gaussian process with zero-mean and its covariance matrix is :math:`C_{\theta}(s,t)`. The trend is deterministic and the gaussian process is probabilistc but they both contribute to the metamodel.
# A special feature of the kriging is the interpolation property : the metamodel is exact at the
# trainig data.
covarianceModel = ot.SquaredExponential([1.], [1.0])

# %%
# We define our exact model with a `SymbolicFunction` :
model = ot.SymbolicFunction(['x'], ['x*sin(0.5*x)'])

# %%
# We use the following sample to train our metamodel :
nTrain = 5
Xtrain = ot.Sample([[0.5], [1.3], [2.4], [5.6], [8.9]])

# %%
# The values of the exact model are also needed for training :
Ytrain = model(Xtrain)

# %%
X3 = ot.Uniform(-m.pi, m.pi)
distX = ot.ComposedDistribution([X1, X2, X3])

# Input sample of size 100 and dimension 3
size = 100
X = distX.getSample(size)

# The Ishigami model
modelIshigami = ot.SymbolicFunction(
    ["X1", "X2", "X3"], ["sin(X1) + 5.0 * (sin(X2))^2 + 0.1 * X3^4 * sin(X1)"])

# Output
Y = modelIshigami(X)

# Using the same covariance model for each marginal
Cov1 = ot.SquaredExponential(1)

# Output covariance model
Cov2 = ot.SquaredExponential(1)

# Set output covariance scale
Cov2.setScale(Y.computeStandardDeviation())

# This is the GSA-type estimator: weight is 1.
W = ot.SquareMatrix(size)
for i in range(size):
    W[i, i] = 1.0

# Using a biased estimator
estimatorTypeV = ot.HSICVStat()
Exemple #26
0
# Scale each direction
inputSample *= 10

model = ot.SymbolicFunction(['x', 'y'], ['cos(0.5*x) + sin(y)'])
outputSample = model(inputSample)

# Validation data
sampleSize = 10
inputValidSample = ot.ComposedDistribution(
    2 * [ot.Uniform(0, 10.0)]).getSample(sampleSize)
outputValidSample = model(inputValidSample)

# 2) Definition of exponential model
# The parameters have been calibrated using TNC optimization
# and AbsoluteExponential models
covarianceModel = ot.SquaredExponential([7.63, 2.11], [7.38])

# 3) Basis definition
basis = ot.ConstantBasisFactory(inputDimension).build()

# Kriging algorithm
algo = ot.KrigingAlgorithm(inputSample, outputSample, covarianceModel, basis)
algo.setOptimizeParameters(False)  # do not optimize hyper-parameters
algo.run()
result = algo.getResult()

vertices = [[1.0, 0.0], [2.0, 0.0], [2.0, 1.0], [1.0, 1.0], [1.5, 0.5]]
simplicies = [[0, 1, 4], [1, 2, 4], [2, 3, 4], [3, 0, 4]]

mesh2D = ot.Mesh(vertices, simplicies)
process = ot.ConditionedGaussianProcess(result, mesh2D)
def test_one_input_one_output():
    sampleSize = 6
    dimension = 1

    f = ot.SymbolicFunction(['x0'], ['x0 * sin(x0)'])

    X = ot.Sample(sampleSize, dimension)
    X2 = ot.Sample(sampleSize, dimension)
    for i in range(sampleSize):
        X[i, 0] = 3.0 + i
        X2[i, 0] = 2.5 + i
    X[0, 0] = 1.0
    X[1, 0] = 3.0
    X2[0, 0] = 2.0
    X2[1, 0] = 4.0
    Y = f(X)
    Y2 = f(X2)

    # create covariance model
    basis = ot.ConstantBasisFactory(dimension).build()
    covarianceModel = ot.SquaredExponential()

    # create algorithm
    algo = ot.KrigingAlgorithm(X, Y, covarianceModel, basis)

    # set sensible optimization bounds and estimate hyperparameters
    algo.setOptimizationBounds(ot.Interval(X.getMin(), X.getMax()))
    algo.run()

    # perform an evaluation
    result = algo.getResult()

    ott.assert_almost_equal(result.getMetaModel()(X), Y)
    ott.assert_almost_equal(result.getResiduals(), [1.32804e-07], 1e-3, 1e-3)
    ott.assert_almost_equal(result.getRelativeErrors(), [5.20873e-21])

    # Kriging variance is 0 on learning points
    covariance = result.getConditionalCovariance(X)
    nullMatrix = ot.Matrix(sampleSize, sampleSize)
    ott.assert_almost_equal(covariance, nullMatrix, 0.0, 1e-13)

    # Kriging variance is non-null on validation points
    validCovariance = result.getConditionalCovariance(X2)
    values = ot.Matrix([[
        0.81942182, -0.35599947, -0.17488593, 0.04622401, -0.03143555,
        0.04054783
    ],
                        [
                            -0.35599947, 0.20874735, 0.10943841, -0.03236419,
                            0.02397483, -0.03269184
                        ],
                        [
                            -0.17488593, 0.10943841, 0.05832917, -0.01779918,
                            0.01355719, -0.01891618
                        ],
                        [
                            0.04622401, -0.03236419, -0.01779918, 0.00578327,
                            -0.00467674, 0.00688697
                        ],
                        [
                            -0.03143555, 0.02397483, 0.01355719, -0.00467674,
                            0.0040267, -0.00631173
                        ],
                        [
                            0.04054783, -0.03269184, -0.01891618, 0.00688697,
                            -0.00631173, 0.01059488
                        ]])
    ott.assert_almost_equal(validCovariance - values, nullMatrix, 0.0, 1e-7)

    # Covariance per marginal & extract variance component
    coll = result.getConditionalMarginalCovariance(X)
    var = [mat[0, 0] for mat in coll]
    ott.assert_almost_equal(var, [0] * sampleSize, 1e-14, 1e-13)

    # Variance per marginal
    var = result.getConditionalMarginalVariance(X)
    ott.assert_almost_equal(var, ot.Sample(sampleSize, 1), 1e-14, 1e-13)

    # Prediction accuracy
    ott.assert_almost_equal(Y2, result.getMetaModel()(X2), 0.3, 0.0)
    XX_input = ot.Sample([[0.1, 0], [0.32, 0], [0.6, 0], [0.9, 0], [
                         0.07, 1], [0.1, 1], [0.4, 1], [0.5, 1], [0.85, 1]])
    y_output = ot.Sample(len(XX_input), 1)
    for i in range(len(XX_input)):
        y_output[i, 0] = fun_mixte(XX_input[i])

    def C(s, t):
        return m.exp(-4.0 * abs(s - t) / (1 + (s * s + t * t)))

    N = 32
    a = 4.0
    myMesh = ot.IntervalMesher([N]).build(ot.Interval(-a, a))

    myCovariance = ot.CovarianceMatrix(myMesh.getVerticesNumber())
    for k in range(myMesh.getVerticesNumber()):
        t = myMesh.getVertices()[k]
        for l in range(k + 1):
            s = myMesh.getVertices()[l]
            myCovariance[k, l] = C(s[0], t[0])

    covModel_discrete = ot.UserDefinedCovarianceModel(myMesh, myCovariance)
    f_ = ot.SymbolicFunction(["tau", "theta", "sigma"], [
                             "(tau!=0) * exp(-1/theta) * sigma * sigma +  (tau==0) * exp(0) * sigma * sigma"])
    rho = ot.ParametricFunction(f_, [1, 2], [0.2, 0.3])
    covModel_discrete = ot.StationaryFunctionalCovarianceModel([1.0], [
                                                               1.0], rho)
    covModel_continuous = ot.SquaredExponential([1.0], [1.0])
    covarianceModel = ot.ProductCovarianceModel(
        [covModel_continuous, covModel_discrete])
    covarianceModel.discretize(XX_input)
Exemple #29
0
# %%
# We rely on `H-Matrix` approximation for accelerating the evaluation.
# We change default parameters (compression, recompression) to higher values. The model is less accurate but very fast to build & evaluate.

# %%
ot.ResourceMap.SetAsString("KrigingAlgorithm-LinearAlgebra", "HMAT")
ot.ResourceMap.SetAsScalar("HMatrix-AssemblyEpsilon", 1e-5)
ot.ResourceMap.SetAsScalar("HMatrix-RecompressionEpsilon", 1e-4)

# %%
# In order to create the Kriging metamodel, we first select a constant trend with the `ConstantBasisFactory` class. Then we use a squared exponential covariance kernel.
# The `SquaredExponential` kernel has one amplitude coefficient and 4 scale coefficients. This is because this covariance kernel is anisotropic : each of the 4 input variables is associated with its own scale coefficient.

# %%
basis = ot.ConstantBasisFactory(dim).build()
covarianceModel = ot.SquaredExponential(dim)

# %%
# Typically, the optimization algorithm is quite good at setting sensible optimization bounds.
# In this case, however, the range of the input domain is extreme.

# %%
print("Lower and upper bounds of X_train:")
print(X_train.getMin(), X_train.getMax())

# %%
# We need to manually define sensible optimization bounds.
# Note that since the amplitude parameter is computed analytically (this is possible when the output dimension is 1), we only need to set bounds on the scale parameter.

# %%
scaleOptimizationBounds = ot.Interval([1.0, 1.0, 1.0, 1.0e-10],
# %%
sampleSize_train = 10
X_train = myDistribution.getSample(sampleSize_train)
Y_train = model(X_train)

# %%
# Create the metamodel
# --------------------

# %%
# In order to create the kriging metamodel, we first select a constant trend with the `ConstantBasisFactory` class. Then we use a squared exponential covariance model. Finally, we use the `KrigingAlgorithm` class to create the kriging metamodel, taking the training sample, the covariance model and the trend basis as input arguments. 

# %%
dimension = myDistribution.getDimension()
basis = ot.ConstantBasisFactory(dimension).build()
covarianceModel = ot.SquaredExponential([1.]*dimension, [1.0])
algo = ot.KrigingAlgorithm(X_train, Y_train, covarianceModel, basis)
algo.run()
result = algo.getResult()
krigingMetamodel = result.getMetaModel()

# %%
# The `run` method has optimized the hyperparameters of the metamodel. 
#
# We can then print the constant trend of the metamodel, which have been estimated using the least squares method.

# %%
result.getTrendCoefficients()

# %%
# We can also print the hyperparameters of the covariance model, which have been estimated by maximizing the likelihood.