def test_two_outputs():
    f = ot.SymbolicFunction(['x'], ['x * sin(x)', 'x * cos(x)'])
    sampleX = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]
    sampleY = f(sampleX)
    basis = ot.Basis([
        ot.SymbolicFunction(['x'], ['x']),
        ot.SymbolicFunction(['x'], ['x^2'])
    ])
    covarianceModel = ot.SquaredExponential([1.0])
    covarianceModel.setActiveParameter([])
    covarianceModel = ot.TensorizedCovarianceModel([covarianceModel] * 2)
    algo = ot.KrigingAlgorithm(sampleX, sampleY, covarianceModel, basis)
    algo.run()
    result = algo.getResult()
    mm = result.getMetaModel()
    assert mm.getOutputDimension() == 2, "wrong output dim"
    ott.assert_almost_equal(mm(sampleX), sampleY)
    # Check the conditional covariance
    reference_covariance = ot.Matrix([[4.4527, 0.0, 8.34404, 0.0],
                                      [0.0, 2.8883, 0.0, 5.41246],
                                      [8.34404, 0.0, 15.7824, 0.0],
                                      [0.0, 5.41246, 0.0, 10.2375]])
    ott.assert_almost_equal(
        result([[9.5], [10.0]]).getCovariance() - reference_covariance,
        ot.Matrix(4, 4), 0.0, 2e-2)
Exemple #2
0
    def _buildKrigingAlgo(self, inputSample, outputSample):
        """
        Build the functional chaos algorithm without running it.
        """
        if self._basis is None:
            # create linear basis only for the defect parameter (1st parameter),
            # constant otherwise
            input = ['x' + str(i) for i in range(self._dim)]
            functions = []
            # constant
            functions.append(ot.SymbolicFunction(input, ['1']))
            # linear for the first parameter only
            functions.append(ot.SymbolicFunction(input, [input[0]]))
            self._basis = ot.Basis(functions)

        if self._covarianceModel is None:
            # anisotropic squared exponential covariance model
            self._covarianceModel = ot.SquaredExponential([1] * self._dim)

        # normalization
        mean = inputSample.computeMean()
        try:
            stddev = inputSample.computeStandardDeviation()
        except AttributeError:
            stddev = inputSample.computeStandardDeviationPerComponent()
        linear = ot.SquareMatrix(self._dim)
        for j in range(self._dim):
            linear[j, j] = 1.0 / stddev[j] if abs(stddev[j]) > 1e-12 else 1.0
        zero = [0.0] * self._dim
        transformation = ot.LinearFunction(mean, zero, linear)

        algoKriging = ot.KrigingAlgorithm(transformation(inputSample),
                                          outputSample, self._covarianceModel,
                                          self._basis)
        return algoKriging, transformation
def test_two_inputs_one_output():
    # Kriging use case
    inputDimension = 2

    # Learning data
    levels = [8, 5]
    box = ot.Box(levels)
    inputSample = box.generate()
    # Scale each direction
    inputSample *= 10.0

    model = ot.SymbolicFunction(['x', 'y'], ['cos(0.5*x) + sin(y)'])
    outputSample = model(inputSample)

    # Validation
    sampleSize = 10
    inputValidSample = ot.ComposedDistribution(
        2 * [ot.Uniform(0, 10.0)]).getSample(sampleSize)
    outputValidSample = model(inputValidSample)

    # 2) Definition of exponential model
    # The parameters have been calibrated using TNC optimization
    # and AbsoluteExponential models
    scales = [5.33532, 2.61534]
    amplitude = [1.61536]
    covarianceModel = ot.SquaredExponential(scales, amplitude)

    # 3) Basis definition
    basis = ot.ConstantBasisFactory(inputDimension).build()

    # 4) Kriging algorithm
    algo = ot.KrigingAlgorithm(inputSample, outputSample, covarianceModel,
                               basis)
    algo.run()

    result = algo.getResult()
    # Get meta model
    metaModel = result.getMetaModel()
    outData = metaModel(inputValidSample)

    # 5) Errors
    # Interpolation
    ott.assert_almost_equal(outputSample, metaModel(inputSample), 3.0e-5,
                            3.0e-5)

    # 6) Kriging variance is 0 on learning points
    covariance = result.getConditionalCovariance(inputSample)
    ott.assert_almost_equal(covariance, ot.SquareMatrix(len(inputSample)),
                            7e-7, 7e-7)

    # Covariance per marginal & extract variance component
    coll = result.getConditionalMarginalCovariance(inputSample)
    var = [mat[0, 0] for mat in coll]
    ott.assert_almost_equal(var, [0] * len(var), 0.0, 1e-13)

    # Variance per marginal
    var = result.getConditionalMarginalVariance(inputSample)
    ott.assert_almost_equal(var, ot.Point(len(inputSample)), 0.0, 1e-13)
    # Estimation
    ott.assert_almost_equal(outputValidSample, outData, 1.e-1, 1e-1)
Exemple #4
0
 def fit(self, X, y, **fit_params):
     input_dimension = X.shape[1]
     covarianceModel = eval('ot.' + self.kernel + "("+str(input_dimension)+")")
     basisCollection = eval('ot.'+ self.basis + "BasisFactory("+str(input_dimension)+").build()")
     algo = ot.KrigingAlgorithm(X, y.reshape(-1, 1), covarianceModel, basisCollection)
     algo.run()
     self._result = algo.getResult()
     return self
def createMyBasicKriging(X, Y):
    '''
    Create a kriging from a pair of X and Y samples.
    We use a 3/2 Matérn covariance model and a constant trend.
    '''
    basis = ot.ConstantBasisFactory(dimension).build()
    covarianceModel = ot.MaternModel([1.0], 1.5)
    algo = ot.KrigingAlgorithm(X, Y, covarianceModel, basis)
    algo.run()
    krigResult = algo.getResult()
    return krigResult
def test_stationary_fun():
    # fix https://github.com/openturns/openturns/issues/1861
    ot.RandomGenerator.SetSeed(0)
    rho = ot.SymbolicFunction("tau", "exp(-abs(tau))*cos(2*pi_*abs(tau))")
    model = ot.StationaryFunctionalCovarianceModel([1], [1], rho)
    x = ot.Normal().getSample(20)
    y = x + ot.Normal(0, 0.1).getSample(20)

    algo = ot.KrigingAlgorithm(x, y, model, ot.LinearBasisFactory().build())
    algo.run()
    result = algo.getResult()
    variance = result.getConditionalMarginalVariance(x)
    ott.assert_almost_equal(variance, ot.Sample(len(x), 1), 1e-16, 1e-16)
def test_two_outputs():
    f = ot.SymbolicFunction(['x'], ['x * sin(x)', 'x * cos(x)'])
    sampleX = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]
    sampleY = f(sampleX)
    basis = ot.Basis([ot.SymbolicFunction(['x'], ['x']),
                      ot.SymbolicFunction(['x'], ['x^2'])])
    covarianceModel = ot.SquaredExponential([1.0])
    covarianceModel.setActiveParameter([])
    algo = ot.KrigingAlgorithm(sampleX, sampleY, covarianceModel, basis)
    algo.run()
    result = algo.getResult()
    mm = result.getMetaModel()
    assert mm.getOutputDimension() == 2, "wrong output dim"
    ott.assert_almost_equal(mm(sampleX), sampleY)
Exemple #8
0
    def fit(self, X, y, **fit_params):
        """Fit Kriging regression model.

        Parameters
        ----------
        X : array-like, shape = (n_samples, n_features)
            Training data.
        y : array-like, shape = (n_samples, [n_output_dims])
            Target values.

        Returns
        -------
        self : returns an instance of self.

        """
        if len(X) == 0:
            raise ValueError(
                "Can not perform a kriging algorithm with empty sample")
        # check data type is accurate
        if (len(np.shape(X)) != 2):
            raise ValueError("X has incorrect shape.")
        input_dimension = len(X[1])
        if len(np.shape(
                y)) == 1:  # For sklearn.multioutput.MultiOutputRegressor
            y = np.expand_dims(y, axis=1)
        if len(np.shape(y)) != 2:
            raise ValueError("y has incorrect shape.")

        if type(self.kernel) is str:
            covarianceModel = eval('ot.' + self.kernel + "(" +
                                   str(input_dimension) + ")")
        else:
            covarianceModel = ot.CovarianceModel(self.kernel)
        if type(self.basis) is str:
            basisCollection = eval('ot.' + self.basis + "BasisFactory(" +
                                   str(input_dimension) + ").build()")
        else:
            basisCollection = ot.Basis(self.basis)
        ot.ResourceMap.SetAsString("KrigingAlgorithm-LinearAlgebra",
                                   str(self.linalg_meth).upper())
        algo = ot.KrigingAlgorithm(X, y, covarianceModel, basisCollection)
        if self.n_iter_opt:
            opt_algo = algo.getOptimizationAlgorithm()
            opt_algo.setMaximumIterationNumber(self.n_iter_opt)
            algo.setOptimizationAlgorithm(opt_algo)
        else:
            algo.setOptimizeParameters(False)
        algo.run()
        self.result_ = algo.getResult()
        return self
Exemple #9
0
    def _buildKrigingAlgo(self, inputSample, outputSample):
        """
        Build the functional chaos algorithm without running it.
        """
        if self._basis is None:
            # create linear basis only for the defect parameter (1st parameter),
            # constant otherwise
            input = ['x' + str(i) for i in range(self._dim)]
            functions = []
            # constant
            functions.append(ot.NumericalMathFunction(input, ['y'], ['1']))
            # linear for the first parameter only
            functions.append(ot.NumericalMathFunction(input, ['y'],
                                                      [input[0]]))
            self._basis = ot.Basis(functions)

        if self._covarianceModel is None:
            # anisotropic squared exponential covariance model
            covColl = ot.CovarianceModelCollection(self._dim)
            for i in range(self._dim):
                if LooseVersion(ot.__version__) == '1.6':
                    covColl[i] = ot.SquaredExponential(1, 1.)
                elif LooseVersion(ot.__version__) > '1.6':
                    covColl[i] = ot.SquaredExponential([1], [1.])
            self._covarianceModel = ot.ProductCovarianceModel(covColl)

        if LooseVersion(ot.__version__) == "1.9":
            algoKriging = ot.KrigingAlgorithm(inputSample, outputSample,
                                              self._covarianceModel,
                                              self._basis)
        else:
            algoKriging = ot.KrigingAlgorithm(inputSample, outputSample,
                                              self._basis,
                                              self._covarianceModel, True)
        algoKriging.run()
        return algoKriging
Exemple #10
0
def fitKriging(covarianceModel):
    '''
    Fit the parameters of a kriging metamodel. 
    '''
    coordinates = ot.Sample([[1.0,1.0],[5.0,1.0],[9.0,1.0], \
                         [1.0,3.5],[5.0,3.5],[9.0,3.5], \
                         [1.0,6.0],[5.0,6.0],[9.0,6.0]])
    observations = ot.Sample([[25.0], [25.0], [10.0], [20.0], [25.0], [20.0],
                              [15.0], [25.0], [25.0]])
    basis = ot.ConstantBasisFactory(2).build()
    algo = ot.KrigingAlgorithm(coordinates, observations, covarianceModel,
                               basis)
    algo.run()
    krigingResult = algo.getResult()
    return krigingResult
def test_one_input_one_output():
    sampleSize = 6
    dimension = 1

    f = ot.SymbolicFunction(['x0'], ['x0 * sin(x0)'])

    X = ot.Sample(sampleSize, dimension)
    X2 = ot.Sample(sampleSize, dimension)
    for i in range(sampleSize):
        X[i, 0] = 3.0 + i
        X2[i, 0] = 2.5 + i
    X[0, 0] = 1.0
    X[1, 0] = 3.0
    X2[0, 0] = 2.0
    X2[1, 0] = 4.0
    Y = f(X)
    Y2 = f(X2)

    # create algorithm
    basis = ot.ConstantBasisFactory(dimension).build()
    covarianceModel = ot.SquaredExponential([1e-02], [4.50736])

    algo = ot.KrigingAlgorithm(X, Y, covarianceModel, basis)
    algo.run()

    # perform an evaluation
    result = algo.getResult()

    ott.assert_almost_equal(result.getMetaModel()(X), Y)
    ott.assert_almost_equal(result.getResiduals(), [1.32804e-07], 1e-3, 1e-3)
    ott.assert_almost_equal(result.getRelativeErrors(), [5.20873e-21])

    # Kriging variance is 0 on learning points
    covariance = result.getConditionalCovariance(X)
    covariancePoint = ot.Point(covariance.getImplementation())
    theoricalVariance = ot.Point(sampleSize * sampleSize)
    ott.assert_almost_equal(covariance,
                            ot.Matrix(sampleSize, sampleSize),
                            8.95e-7, 8.95e-7)

    # Covariance per marginal & extract variance component
    coll = result.getConditionalMarginalCovariance(X)
    var = [mat[0, 0] for mat in coll]
    ott.assert_almost_equal(var, [0]*sampleSize, 1e-14, 1e-14)

    # Variance per marginal
    var = result.getConditionalMarginalVariance(X)
    ott.assert_almost_equal(var, ot.Point(sampleSize), 1e-14, 1e-14)
Exemple #12
0
    def init_model(self, noise):
        '''
        This function constructs the regression model
        '''

        if type(self.kernel_function) == str or type(
                self.mean_function) == str:
            if type(self.kernel_function) == str:
                print(self.kernel_function)
            if type(self.mean_function) == str:
                print(self.mean_function)
            self.model = 'No model'

        self.nugget = noise
        self.model = ot.KrigingAlgorithm(self.x_train, self.z_train,
                                         self.kernel_function,
                                         self.mean_function, False)
        self.model.setNoise([self.nugget] * len(self.x_train))
Exemple #13
0
def fitKriging(coordinates, observations, covarianceModel, basis):
    '''
    Fit the parameters of a Kriging metamodel.
    '''
    # Define the Kriging algorithm.
    algo = ot.KrigingAlgorithm(coordinates, observations, covarianceModel,
                               basis)

    # Set the optimization bounds for the scale parameter to sensible values
    # given the data set.
    scale_dimension = covarianceModel.getScale().getDimension()
    algo.setOptimizationBounds(
        ot.Interval([lower] * scale_dimension, [upper] * scale_dimension))

    # Run the Kriging algorithm and extract the fitted surrogate model.
    algo.run()
    krigingResult = algo.getResult()
    krigingMetamodel = krigingResult.getMetaModel()
    return krigingResult, krigingMetamodel
Exemple #14
0
# %%
# We generate a simple Monte-Carlo input sample and evaluate the corresponding output sample.

# %%
distribution = ot.Normal(dimension)
samplesize = 15
x  = distribution.getSample(samplesize)
y = model(x)

# %%
# Then we create a kriging metamodel, using a constant trend and a squared exponential covariance model. 

# %%
basis = ot.ConstantBasisFactory(dimension).build()
covarianceModel = ot.SquaredExponential([0.1]*dimension, [1.0])
algo = ot.KrigingAlgorithm(x, y, covarianceModel, basis)
algo.run()
result = algo.getResult()
metamodel = result.getMetaModel()

# %%
# It is not so easy to visualize a bidimensional function. In order to simplify the graphics, we consider the value of the function at the input :math:`x_{1,ref}=0.5`. This amounts to create a `ParametricFunction` where the first variable :math:`x_1` (at input index 0) is set to :math:`0.5`.

# %%
x1ref = 0.5
metamodelAtXref = ot.ParametricFunction(metamodel, [0], [x1ref])
modelAtXref = ot.ParametricFunction(model, [0], [x1ref])

# %%
# For this given value of :math:`x_1`, we plot the model and the metamodel with :math:`x_2` from its 1% up to its 99% quantile. We configure the X title to "X2" because the default setting would state that this axis is the first value of the parametric function, which default name is "X0".
                                                    outputValidation,
                                                    result.getMetaModel())
    print("")
    print("Sparse chaos scoring")
    print("Q2 = ", round(metaModelValidationSPC.computePredictivityFactor(),
                         5))
    print("Residual sample = ",
          repr(metaModelValidationSPC.getResidualSample()))

    # 2) Kriging algorithm
    # KrigingAlgorithm
    basis = ot.QuadraticBasisFactory(dimension).build()
    # model already computed, separatly
    covarianceModel = ot.GeneralizedExponential([1.933, 1.18, 1.644], [10.85],
                                                2.0)
    algo2 = ot.KrigingAlgorithm(inputSample, outputSample, basis,
                                covarianceModel, True, True)
    algo2.run()
    result2 = algo2.getResult()

    # MetaModelValidation - KG
    metaModelValidationKG = ot.MetaModelValidation(inputValidation,
                                                   outputValidation,
                                                   result2.getMetaModel())
    print("")
    print("Kriging scoring")
    print("Q2 = ", round(metaModelValidationKG.computePredictivityFactor(), 3))
    ot.PlatformInfo.SetNumericalPrecision(2)
    print("Residual sample = ",
          repr(metaModelValidationKG.getResidualSample()))

except:
def test_one_input_one_output():
    sampleSize = 6
    dimension = 1

    f = ot.SymbolicFunction(['x0'], ['x0 * sin(x0)'])

    X = ot.Sample(sampleSize, dimension)
    X2 = ot.Sample(sampleSize, dimension)
    for i in range(sampleSize):
        X[i, 0] = 3.0 + i
        X2[i, 0] = 2.5 + i
    X[0, 0] = 1.0
    X[1, 0] = 3.0
    X2[0, 0] = 2.0
    X2[1, 0] = 4.0
    Y = f(X)
    Y2 = f(X2)

    # create covariance model
    basis = ot.ConstantBasisFactory(dimension).build()
    covarianceModel = ot.SquaredExponential()

    # create algorithm
    algo = ot.KrigingAlgorithm(X, Y, covarianceModel, basis)

    # set sensible optimization bounds and estimate hyperparameters
    algo.setOptimizationBounds(ot.Interval(X.getMin(), X.getMax()))
    algo.run()

    # perform an evaluation
    result = algo.getResult()

    ott.assert_almost_equal(result.getMetaModel()(X), Y)
    ott.assert_almost_equal(result.getResiduals(), [1.32804e-07], 1e-3, 1e-3)
    ott.assert_almost_equal(result.getRelativeErrors(), [5.20873e-21])

    # Kriging variance is 0 on learning points
    covariance = result.getConditionalCovariance(X)
    nullMatrix = ot.Matrix(sampleSize, sampleSize)
    ott.assert_almost_equal(covariance, nullMatrix, 0.0, 1e-13)

    # Kriging variance is non-null on validation points
    validCovariance = result.getConditionalCovariance(X2)
    values = ot.Matrix([[
        0.81942182, -0.35599947, -0.17488593, 0.04622401, -0.03143555,
        0.04054783
    ],
                        [
                            -0.35599947, 0.20874735, 0.10943841, -0.03236419,
                            0.02397483, -0.03269184
                        ],
                        [
                            -0.17488593, 0.10943841, 0.05832917, -0.01779918,
                            0.01355719, -0.01891618
                        ],
                        [
                            0.04622401, -0.03236419, -0.01779918, 0.00578327,
                            -0.00467674, 0.00688697
                        ],
                        [
                            -0.03143555, 0.02397483, 0.01355719, -0.00467674,
                            0.0040267, -0.00631173
                        ],
                        [
                            0.04054783, -0.03269184, -0.01891618, 0.00688697,
                            -0.00631173, 0.01059488
                        ]])
    ott.assert_almost_equal(validCovariance - values, nullMatrix, 0.0, 1e-7)

    # Covariance per marginal & extract variance component
    coll = result.getConditionalMarginalCovariance(X)
    var = [mat[0, 0] for mat in coll]
    ott.assert_almost_equal(var, [0] * sampleSize, 1e-14, 1e-13)

    # Variance per marginal
    var = result.getConditionalMarginalVariance(X)
    ott.assert_almost_equal(var, ot.Sample(sampleSize, 1), 1e-14, 1e-13)

    # Prediction accuracy
    ott.assert_almost_equal(Y2, result.getMetaModel()(X2), 0.3, 0.0)
# %%
# We also get the output training values :
ydata = bm.objectiveFunction(xdata)

# %%
# This use case is defined in dimension 2 and we use a constant basis for the trend estimation :
dimension = bm.dim
basis = ot.ConstantBasisFactory(dimension).build()

# %%
# We choose a squared exponential covariance model in dimension 2 :
covarianceModel = ot.SquaredExponential([0.1] * dimension, [1.0])

# %%
# We have all the components to build a kriging algorithm and run it :
algo = ot.KrigingAlgorithm(xdata, ydata, covarianceModel, basis)
algo.run()

# %%
# We get the result of the kriging analysis with :
result = algo.getResult()

# %%
# Metamodel visualization
# -----------------------
#
# We draw the kriging metamodel of the Branin function. It is the mean of the random process.
metamodel = result.getMetaModel()

graphBasic = metamodel.draw([0.0, 0.0], [1.0, 1.0], [100] * 2)
drawables = graphBasic.getDrawables()
itf = ot.SymbolicFunction(['mu', 'sigma', 'x'], ['sigma*x+mu'])
myInverseTransform = ot.ParametricFunction(itf, [0, 1], [mean, stdDev])
myTransform = ot.ParametricFunction(tf, [0, 1], [mean, stdDev])

# %%
# A constant basis
# ----------------
#
# In this paragraph we choose a basis constant for the kriging. There is only one unknown which is the
# value of the constant. The basis is built with the :class:`~openturns.ConstantBasisFactory` class.
basis = ot.ConstantBasisFactory(dimension).build()

# %%
# We build the kriging algorithm by giving it the transformed data, the output data, the covariance
# model and the basis.
algo = ot.KrigingAlgorithm(myTransform(Xtrain), Ytrain, covarianceModel, basis)

# %%
# We can run the algorithm and store the result :
algo.run()
result = algo.getResult()

# %%
# The metamodel is the following :class:`~openturns.ComposedFunction` :
metamodel = ot.ComposedFunction(result.getMetaModel(), myTransform)

# %%
# We can draw the metamodel and the exact model on the same graph.
graph = plot_exact_model()
y_test = metamodel(x_test)
curve = ot.Curve(x_test, y_test)
# %%
# Create the kriging algorithm
# ----------------------------

# 1. basis
ot.ResourceMap.SetAsBool(
    'GeneralLinearModelAlgorithm-UseAnalyticalAmplitudeEstimate', True)
basis = ot.ConstantBasisFactory(dim).build()
print(basis)

# 2. covariance model
cov = ot.MaternModel([1.], [2.5], 1.5)
print(cov)

# 3. kriging algorithm
algokriging = ot.KrigingAlgorithm(x, y, cov, basis)

## error measure
#algokriging.setNoise([5*1e-1]*n_pt)

# 4. Optimization
# algokriging.setOptimizationAlgorithm(ot.NLopt('GN_DIRECT'))
lhsExperiment = ot.LHSExperiment(ot.Uniform(1e-1, 1e2), 50)
algokriging.setOptimizationAlgorithm(
    ot.MultiStart(ot.TNC(), lhsExperiment.generate()))
algokriging.setOptimizationBounds(ot.Interval([0.1], [1e2]))

# if we choose not to optimize parameters
#algokriging.setOptimizeParameters(False)

# 5. run the algorithm
    inputSample *= 10

    # Define model
    model = ot.Function(['x', 'y'], ['z'], ['cos(0.5*x) + sin(y)'])
    outputSample = model(inputSample)

    # 2) Definition of exponential model
    covarianceModel = ot.SquaredExponential([1.988, 0.924], [3.153])

    # 3) Basis definition
    basisCollection = ot.BasisCollection(
        1,
        ot.ConstantBasisFactory(spatialDimension).build())

    # Kriring algorithm
    algo = ot.KrigingAlgorithm(inputSample, outputSample, covarianceModel,
                               basisCollection)
    algo.run()
    result = algo.getResult()

    vertices = [[1.0, 0.0], [2.0, 0.0], [2.0, 1.0], [1.0, 1.0], [1.5, 0.5]]
    simplicies = [[0, 1, 4], [1, 2, 4], [2, 3, 4], [3, 0, 4]]

    mesh2D = ot.Mesh(vertices, simplicies)
    process = ot.ConditionedGaussianProcess(result, mesh2D)

    # Get a realization of the process
    realization = process.getRealization()
    print("realization = ", repr(realization))

    # Get a sample & compare it to expectation
    sample = process.getSample(5000)
# problem
problem = ot.OptimizationProblem()
problem.setObjective(model)
bounds = ot.Interval([0.0] * dim, [1.0] * dim)
problem.setBounds(bounds)

# design
experiment = ot.Box([1, 1])
inputSample = experiment.generate()
modelEval = model(inputSample)
outputSample = modelEval.getMarginal(0)

# first kriging model
covarianceModel = ot.SquaredExponential([0.3007, 0.2483], [0.981959])
basis = ot.ConstantBasisFactory(dim).build()
kriging = ot.KrigingAlgorithm(inputSample, outputSample, covarianceModel,
                              basis)
noise = list(map(lambda x: x[1], modelEval))
kriging.setNoise(noise)
kriging.run()

# algo
algo = ot.EfficientGlobalOptimization(problem, kriging.getResult())
algo.setNoiseModel(ot.SymbolicFunction(['x1', 'x2'],
                                       ['0.96']))  # assume constant noise var
algo.setMaximumIterationNumber(20)
algo.setImprovementFactor(
    0.05)  # stop whe improvement is < a% the current optimum
algo.setAEITradeoff(0.66744898)
algo.run()
result = algo.getResult()
# print('1st pass result=', result)
Exemple #22
0
import openturns as ot
from openturns.viewer import View

f = ot.SymbolicFunction(['x'],  ['x * sin(x)'])
sampleX = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]
sampleY = f(sampleX)
basis = ot.Basis([ot.SymbolicFunction(['x'], ['x']),
                  ot.SymbolicFunction(['x'], ['x^2'])])
covarianceModel = ot.SquaredExponential([1.0])
covarianceModel.setActiveParameter([])
algo = ot.KrigingAlgorithm(sampleX, sampleY, covarianceModel, basis)
algo.run()
result = algo.getResult()

graph = f.draw(0.0, 10.0)
graph.add(result.getMetaModel().draw(0.0, 10.0))
graph.add(ot.Cloud(sampleX, sampleY))
graph.setColors(['blue', 'red', 'black'])
graph.setLegends(['model', 'meta model', 'sample'])
graph.setLegendPosition('topleft')
graph.setTitle('y(x)=x * sin(x)')
View(graph, figure_kwargs={'figsize': (8, 4)})
Exemple #23
0
    def _estimKrigingTheta(self, algoKriging, lowerBound, upperBound, size):
        """
        Estimate the kriging theta values with an initial random search using
        a Sobol sequence of size samples.
        """
        # get input parameters of the kriging algorithm
        X = algoKriging.getInputSample()
        Y = algoKriging.getOutputSample()

        algoKriging.run()
        krigingResult = algoKriging.getResult()
        covarianceModel = krigingResult.getCovarianceModel()
        basis = krigingResult.getBasisCollection()
        if LooseVersion(ot.__version__) == '1.9':
            llf = algoKriging.getReducedLogLikelihoodFunction()
        else:
            llf = algoKriging.getLogLikelihoodFunction()

        # create uniform distribution of the parameters bounds
        dim = len(lowerBound)
        distBoundCol = []
        for i in range(dim):
            distBoundCol += [ot.Uniform(lowerBound[i], upperBound[i])]
        distBound = ot.ComposedDistribution(distBoundCol)

        if size > 0:
            # Generate starting points with a low discrepancy sequence
            thetaStart = ot.LowDiscrepancyExperiment(ot.SobolSequence(),
                                                     distBound,
                                                     size).generate()
            # Get the best theta from the maximum llf value
            llfValue = llf(thetaStart)
            indexMax = int(np.argmax(llfValue))
            bestTheta = thetaStart[indexMax]

            # update theta after random search
            if LooseVersion(ot.__version__) == '1.6':
                covarianceModel.setScale(bestTheta)
            elif LooseVersion(ot.__version__) > '1.6':
                # optimize theta and sigma in ot 1.8
                covarianceModel.setScale(bestTheta[:-1])
                covarianceModel.setAmplitude([bestTheta[-1]])

        # Now the KrigingAlgorithm is used to optimize the likelihood using a
        # good starting point
        if LooseVersion(ot.__version__) == "1.9":
            algoKriging = ot.KrigingAlgorithm(X, Y, covarianceModel, basis)
        else:
            algoKriging = ot.KrigingAlgorithm(X, Y, basis, covarianceModel,
                                              True)

        # set TNC optim
        searchInterval = ot.Interval(lowerBound, upperBound)
        if LooseVersion(ot.__version__) == '1.6':
            optimizer = ot.TNC()
            optimizer.setBoundConstraints(searchInterval)
            algoKriging.setOptimizer(optimizer)
        elif LooseVersion(ot.__version__) in ['1.7', '1.8']:
            optimizer = algoKriging.getOptimizationSolver()
            problem = optimizer.getProblem()
            problem.setBounds(searchInterval)
            optimizer.setProblem(problem)
            algoKriging.setOptimizationSolver(optimizer)
        elif LooseVersion(ot.__version__) == '1.9':
            algoKriging.setOptimizationBounds(searchInterval)

        return algoKriging
Exemple #24
0
                     [39.339349], [40.384559], [38.718623], [46.189709],
                     [36.155737], [31.768369], [35.384313], [47.914584],
                     [46.758537], [46.564428], [39.698493], [45.636588],
                     [40.643948]])

# detection threshold
detection = 38

# Select point as initial DOE
inputDOE = inputSample[:]
outputDOE = signals[:]

# simulate the true physical model
basis = ot.ConstantBasisFactory(4).build()
covarianceModel = ot.SquaredExponential([5.03148, 13.9442, 20, 20], [15.1697])
krigingModel = ot.KrigingAlgorithm(inputSample, signals, covarianceModel,
                                   basis)

ot.RandomGenerator.SetSeed(0)
np.random.seed(0)
krigingModel.run()
physicalModel = krigingModel.getResult().getMetaModel()

####### Test on the POD models ###################
# Test hitmiss without Box Cox with rf classifier
np.random.seed(0)
ot.RandomGenerator.SetSeed(0)
ot.RandomGenerator.SetState(ot.RandomGeneratorState(ot.Indices([0] * 768), 0))
POD1 = otpod.AdaptiveHitMissPOD(inputDOE, outputDOE, physicalModel, 20,
                                detection)
POD1.run()
detectionSize1 = POD1.computeDetectionSize(0.9, 0.95)
    def run(self):
        """
        Launch the algorithm and build the POD models.

        Notes
        -----
        This method launches the iterative algorithm. First the censored data
        are filtered if needed. The Box Cox transformation is performed if it is
        enabled. Then the enrichment of the design of experiments is performed.
        Once the algorithm stops, it builds the POD models : conditional samples are 
        simulated for each defect size, then the distributions of the probability
        estimator (for MC simulation) are built. Eventually, a sample of this
        distribution is used to compute the mean POD and the POD at the confidence
        level.
        """

        # Create an initial uniform distribution if not given
        if self._distribution is None:
            inputMin = self._input.getMin()
            inputMin[0] = np.min(self._defectSizes)
            inputMax = self._input.getMax()
            inputMax[0] = np.max(self._defectSizes)
            marginals = [ot.Uniform(inputMin[i], inputMax[i]) for i in range(self._dim)]
            self._distribution = ot.ComposedDistribution(marginals)

        # Create the design of experiments of the candidate points where the
        # criterion is computed
        if self._distribution.hasIndependentCopula():
            # without copula use low discrepancy experiment as first doe
            doeCandidate = ot.LowDiscrepancyExperiment(ot.SobolSequence(), 
                            self._distribution, self._candidateSize).generate()
        else:
            # else simple Monte Carlo distribution
            doeCandidate = self._distribution.getSample(self._candidateSize)

        # build initial kriging model
        # build the kriging model without optimization
        algoKriging = self._buildKrigingAlgo(self._input, self._signals)
        if self._verbose:
            print('Building the kriging model')
            print('Optimization of the covariance model parameters...')

        if LooseVersion(ot.__version__) == '1.9':
            llDim = algoKriging.getReducedLogLikelihoodFunction().getInputDimension()
        else:
            llDim = algoKriging.getLogLikelihoodFunction().getInputDimension()
        lowerBound = [0.001] * llDim
        upperBound = [50] * llDim               
        algoKriging = self._estimKrigingTheta(algoKriging,
                                              lowerBound, upperBound,
                                              self._initialStartSize)
        algoKriging.run()

        # Get kriging results
        self._krigingResult = algoKriging.getResult()
        self._covarianceModel = self._krigingResult.getCovarianceModel()
        self._basis = self._krigingResult.getBasisCollection()
        metamodel = self._krigingResult.getMetaModel()

        self._Q2 = self._computeQ2(self._input, self._signals, self._krigingResult)
        if self._verbose:
            print('Kriging validation Q2 (>0.9): {:0.4f}\n'.format(self._Q2))

        plt.ion()
        # Start the improvment loop
        iteration = 0
        while iteration < self._nIteration:
            iteration += 1
            if self._verbose:
                print('Iteration : {}/{}'.format(iteration, self._nIteration))

            # compute POD (ptrue = pn-1) for bias reducing in the criterion
            # Monte Carlo for all defect sizes in a vectorized way.
            # get Sample for all parameters except the defect size
            samplePred = self._distribution.getSample(self._samplingSize)[:,1:]
            fullSamplePred = ot.NumericalSample(self._samplingSize * self._defectNumber,
                                                self._dim)
            # Add the defect sizes as first value 
            for i, defect in enumerate(self._defectSizes):
                fullSamplePred[self._samplingSize*i:self._samplingSize*(i+1), :] = \
                                        self._mergeDefectInX(defect, samplePred)
            meanPredictionSample = metamodel(fullSamplePred)
            meanPredictionSample = np.reshape(meanPredictionSample, (self._samplingSize,
                                                    self._defectNumber), 'F')
            # compute the POD for all defect sizes
            currentPOD = np.mean(meanPredictionSample > self._detectionBoxCox, axis=0)

            # Compute criterion for all candidate in the candidate doe
            criterion = 1000000000
            for icand, candidate in enumerate(doeCandidate):

                # add the current candidate to the kriging doe
                inputAugmented = self._input[:]
                inputAugmented.add(candidate)
                signalsAugmented = self._signals[:]
                # predict the signal value of the candidate using the current
                # kriging model
                signalsAugmented.add(metamodel(candidate))
                # create a temporary kriging model with the new doe and without
                # updating the covariance model parameters
                if LooseVersion(ot.__version__) == '1.9':
                    algoKrigingTemp = ot.KrigingAlgorithm(inputAugmented, signalsAugmented,
                                                          self._covarianceModel,
                                                          self._basis,
                                                          True)
                else:
                    algoKrigingTemp = ot.KrigingAlgorithm(inputAugmented, signalsAugmented,
                                                          self._basis,
                                                          self._covarianceModel,
                                                          True)
                if LooseVersion(ot.__version__) > '1.6':
                    optimizer = algoKrigingTemp.getOptimizationSolver()
                    optimizer.setMaximumIterationNumber(0)
                    algoKrigingTemp.setOptimizationSolver(optimizer)

                algoKrigingTemp.run()
                krigingResultTemp = algoKrigingTemp.getResult()

                # compute the criterion for all defect size
                crit = []
                # save results, used to compute the PODModel et PODCLModel
                PODPerDefect = ot.NumericalSample(self._simulationSize *
                                         self._samplingSize, self._defectNumber)
                for idef, defect in enumerate(self._defectSizes):
                    podSample = self._computePODSamplePerDefect(defect,
                        self._detectionBoxCox, krigingResultTemp,
                        self._distribution, self._simulationSize, self._samplingSize)
                    PODPerDefect[:, idef] = podSample

                    meanPOD = podSample.computeMean()[0]
                    varPOD = podSample.computeVariance()[0]
                    crit.append(varPOD + (meanPOD - currentPOD[idef])**2)
                # compute the criterion aggregated for all defect sizes
                newCriterion = np.sqrt(np.mean(crit))

                # check if the result is better or not
                if newCriterion < criterion:
                    self._PODPerDefect = PODPerDefect
                    criterion = newCriterion
                    indexOpt = icand
                
                if self._verbose:
                    updateProgress(icand, int(doeCandidate.getSize()), 'Computing criterion')

            # get the best candidate
            candidateOpt = doeCandidate[indexOpt]
            # add new point to DOE
            self._input.add(candidateOpt)
            # add the signal computed by the physical model
            if self._boxCox:
                self._signals.add(self._boxCoxTransform(self._physicalModel(candidateOpt)))
            else:
                self._signals.add(self._physicalModel(candidateOpt))
            # remove added candidate from the doeCandidate
            doeCandidate.erase(indexOpt)
            if self._verbose:
                print('Criterion value : {:0.4f}'.format(criterion))
                print('Added point : {}'.format(candidateOpt))
                print('Update the kriging model')

            # update the kriging model without optimization
            algoKriging = self._buildKrigingAlgo(self._input, self._signals)
            if LooseVersion(ot.__version__) == '1.7':
                optimizer = algoKriging.getOptimizationSolver()
                optimizer.setMaximumIterationNumber(0)
                algoKriging.setOptimizationSolver(optimizer)
            elif LooseVersion(ot.__version__) == '1.8':
                algoKriging.setOptimizeParameters(False)

            algoKriging.run()

            self._Q2 = self._computeQ2(self._input, self._signals, algoKriging.getResult())

            # Check the quality of the kriging model if it needs optimization
            if self._Q2 < 0.95:
                if self._verbose:
                    print('Optimization of the covariance model parameters...')

                if LooseVersion(ot.__version__) == '1.9':
                    llDim = algoKriging.getReducedLogLikelihoodFunction().getInputDimension()
                else:
                    llDim = algoKriging.getLogLikelihoodFunction().getInputDimension()
                lowerBound = [0.001] * llDim
                upperBound = [50] * llDim               
                algoKriging = self._estimKrigingTheta(algoKriging,
                                                      lowerBound, upperBound,
                                                      self._initialStartSize)
                algoKriging.run()

            # Get kriging results
            self._krigingResult = algoKriging.getResult()
            self._covarianceModel = self._krigingResult.getCovarianceModel()
            self._basis = self._krigingResult.getBasisCollection()
            metamodel = self._krigingResult.getMetaModel()

            self._Q2 = self._computeQ2(self._input, self._signals, self._krigingResult)
            if self._verbose:
                print('Kriging validation Q2 (>0.9): {:0.4f}'.format(self._Q2))

            if self._graph:
                # create the interpolate function of the POD model
                meanPOD = self._PODPerDefect.computeMean()
                interpModel = interp1d(self._defectSizes, np.array(meanPOD), kind='linear')
                self._PODmodel = ot.PythonFunction(1, 1, interpModel)
                # The POD at confidence level is built in getPODCLModel() directly
                fig, ax = self.drawPOD(self._probabilityLevel, self._confidenceLevel)
                plt.draw()
                plt.pause(0.001)
                plt.show()
                if self._graphDirectory is not None:
                    fig.savefig(os.path.join(self._graphDirectory, 'AdaptiveSignalPOD_')+str(iteration),
                                bbox_inches='tight', transparent=True)

        # Compute the final POD with the last updated kriging model
        if self._verbose:
                print('\nStart computing the POD with the last updated kriging model')
        # compute the sample containing the POD values for all defect 
        self._PODPerDefect = ot.NumericalSample(self._simulationSize *
                                         self._samplingSize, self._defectNumber)
        for i, defect in enumerate(self._defectSizes):
            self._PODPerDefect[:, i] = self._computePODSamplePerDefect(defect,
                self._detectionBoxCox, self._krigingResult, self._distribution,
                self._simulationSize, self._samplingSize)
            if self._verbose:
                updateProgress(i, self._defectNumber, 'Computing POD per defect')

        # compute the mean POD 
        meanPOD = self._PODPerDefect.computeMean()
        # create the interpolate function of the POD model
        interpModel = interp1d(self._defectSizes, np.array(meanPOD), kind='linear')
        self._PODmodel = ot.PythonFunction(1, 1, interpModel)

        # The POD at confidence level is built in getPODCLModel() directly

        # remove the interactive plotting
        plt.ioff()
Exemple #26
0
sampleSize = 10
inputValidSample = ot.ComposedDistribution(
    2 * [ot.Uniform(1.0, 9.0)]).getSample(sampleSize)
outputValidSample = model(inputValidSample)

# Reimplement the squared exponential covariance model
rho = ot.SymbolicFunction(
    ['x', 'y'], ['exp(-0.5* (x * x + y * y))'])
covarianceModel = ot.StationaryFunctionalCovarianceModel([6.0, 2.0], [
                                                         1.5], rho)

# Basis definition
basis = ot.LinearBasisFactory(inputDimension).build()

# Kriging algorithm
algo = ot.KrigingAlgorithm(inputSample, outputSample, covarianceModel, basis)
start = [50.0] * inputDimension
loglikelihood = algo.getReducedLogLikelihoodFunction()(start)
algo.setOptimizeParameters(False)
algo.run()
result = algo.getResult()
metaModel = result.getMetaModel()
variance = result.getConditionalMarginalVariance(inputSample)
ott.assert_almost_equal(variance, ot.Sample(
    inputSample.getSize(), 1), 1e-14, 1e-14)


# Consistency check: does the reimplementation fit the SquaredExponential class?
squaredExponential = ot.SquaredExponential(inputDimension)
squaredExponential.setParameter([6.0, 2.0, 1.5])
algoSE = ot.KrigingAlgorithm(
Exemple #27
0
# We need to manually define sensible optimization bounds.
# Note that since the amplitude parameter is computed analytically (this is possible when the output dimension is 1), we only need to set bounds on the scale parameter.

# %%
scaleOptimizationBounds = ot.Interval([1.0, 1.0, 1.0, 1.0e-10],
                                      [1.0e11, 1.0e3, 1.0e1, 1.0e-5])

# %%
# Finally, we use the `KrigingAlgorithm` class to create the Kriging metamodel.
# It requires a training sample, a covariance kernel and a trend basis as input arguments.
# We need to set the initial scale parameter for the optimization. The upper bound of the input domain is a sensible choice here.
# We must not forget to actually set the optimization bounds defined above.

# %%
covarianceModel.setScale(X_train.getMax())
algo = ot.KrigingAlgorithm(X_train, Y_train, covarianceModel, basis)
algo.setOptimizationBounds(scaleOptimizationBounds)

# %%
# The `run` method has optimized the hyperparameters of the metamodel.
#
# We can then print the constant trend of the metamodel, which have been estimated using the least squares method.

# %%
algo.run()
result = algo.getResult()
krigingMetamodel = result.getMetaModel()

# %%
# The `getTrendCoefficients` method returns the coefficients of the trend.
Exemple #28
0
# Now that Karhunen-Loeve algorithm is trained, we can project them
# in the smaller-dimension space:
projectionSample = resultKL.project(outputFMUSample)
n_mode = projectionSample.getDimension()
print("Karhunen-Loeve projection is dimension {}".format(n_mode))

# %%
# We keep on following our road map, by metamodeling the projection
# of the curves on the smaller-dimension space.
# We metamodel the Karhunen-Loeve coefficients using ordinary Kriging.

dim = inputSample.getDimension()  # only 1 input dimension
basis = ot.ConstantBasisFactory(dim).build()
covarianceModel = ot.SquaredExponential(dim)

algo = ot.KrigingAlgorithm(inputSample, projectionSample, covarianceModel,
                           basis)
algo.run()
result = algo.getResult()
metamodel = result.getMetaModel()

# %%
# We have created all pieces for a "PointToField" metamodel. Let put these
# pieces together:


def globalMetamodel(sample):
    emulatedCoefficients = metamodel(sample)
    restoreFunction = ot.KarhunenLoeveLifting(resultKL)
    emulatedProcessSample = restoreFunction(emulatedCoefficients)
    return emulatedProcessSample