Example #1
0
    def _estimKrigingTheta(self, algoKriging, lowerBound, upperBound, size):
        """
        Estimate the kriging theta values with an initial random search using
        a Sobol sequence of size samples.
        """

        if size > 0:
            # create uniform distribution of the parameters bounds
            dim = len(lowerBound)
            distBoundCol = []
            for i in range(dim):
                distBoundCol += [ot.Uniform(lowerBound[i], upperBound[i])]
            distBound = ot.ComposedDistribution(distBoundCol)

            # set the bounds
            searchInterval = ot.Interval(lowerBound, upperBound)
            algoKriging.setOptimizationBounds(searchInterval)
            # Generate starting points with a low discrepancy sequence
            startingPoint = ot.LowDiscrepancyExperiment(
                ot.SobolSequence(), distBound, size).generate()

            algoKriging.setOptimizationAlgorithm(
                ot.MultiStart(ot.TNC(), startingPoint))
        else:
            algoKriging.setOptimizeParameters(False)

        return algoKriging
Example #2
0
    def build(self, dataX, dataY):
        logLikelihood = ot.NumericalMathFunction(ReducedLogLikelihood(dataX, dataY))
        xlb = np.linspace(self.lambdaMin_,self.lambdaMax_,num=500)
        lambdax = [logLikelihood([x])[0] for x in xlb]
        algo = ot.TNC(logLikelihood)
        algo.setStartingPoint([xlb[np.array(lambdax).argmax()]])
        algo.setBoundConstraints(ot.Interval(self.lambdaMin_, self.lambdaMax_))
        algo.setOptimizationProblem(ot.BoundConstrainedAlgorithmImplementationResult.MAXIMIZATION)
        algo.run()
        optimalLambda = algo.getResult().getOptimizer()[0]

        # graph
        optimalLogLikelihood = algo.getResult().getOptimalValue()
        graph = logLikelihood.draw(0.01 * optimalLambda, 10.0 * optimalLambda)
        c = ot.Cloud([[optimalLambda, optimalLogLikelihood]])
        c.setColor("red")
        c.setPointStyle("circle")
        graph.add(c)
        return ot.BoxCoxTransform([optimalLambda]), graph
globalErrorCovariance = ot.CovarianceMatrix(2 * m)
for i in range(2 * m):
    globalErrorCovariance[i, i] = 2.0 + (1.0 + i) * (1.0 + i)
    for j in range(i):
        globalErrorCovariance[i, j] = 1.0 / (1.0 + i + j)
bootstrapSizes = [0, 100]
for bootstrapSize in bootstrapSizes:
    algo = ot.GaussianNonLinearCalibration(modelX, x, y, candidate,
                                           priorCovariance, errorCovariance)
    algo.setBootstrapSize(bootstrapSize)
    algo.run()
    # To avoid discrepance between the plaforms with or without CMinpack
    print("result   (Auto)=", algo.getResult().getParameterMAP())
    algo.setOptimizationAlgorithm(
        ot.MultiStart(
            ot.TNC(),
            ot.LowDiscrepancyExperiment(
                ot.SobolSequence(),
                ot.Normal(
                    candidate,
                    ot.CovarianceMatrix(ot.Point(candidate).getDimension())),
                ot.ResourceMap.GetAsUnsignedInteger(
                    "GaussianNonLinearCalibration-MultiStartSize")).generate())
    )
    algo.run()
    # To avoid discrepance between the plaforms with or without CMinpack
    print("result    (TNC)=", algo.getResult().getParameterMAP())
    algo = ot.GaussianNonLinearCalibration(modelX, x, y, candidate,
                                           priorCovariance,
                                           globalErrorCovariance)
    algo.setBootstrapSize(bootstrapSize)
# 2. covariance model
cov = ot.MaternModel([1.], [2.5], 1.5)
print(cov)

# 3. kriging algorithm
algokriging = ot.KrigingAlgorithm(x, y, cov, basis)

## error measure
#algokriging.setNoise([5*1e-1]*n_pt)

# 4. Optimization
# algokriging.setOptimizationAlgorithm(ot.NLopt('GN_DIRECT'))
lhsExperiment = ot.LHSExperiment(ot.Uniform(1e-1, 1e2), 50)
algokriging.setOptimizationAlgorithm(
    ot.MultiStart(ot.TNC(), lhsExperiment.generate()))
algokriging.setOptimizationBounds(ot.Interval([0.1], [1e2]))

# if we choose not to optimize parameters
#algokriging.setOptimizeParameters(False)

# 5. run the algorithm
algokriging.run()

# %%
# Results
# -------

# %%
# get some results
krigingResult = algokriging.getResult()
Example #5
0
    def _estimKrigingTheta(self, algoKriging, lowerBound, upperBound, size):
        """
        Estimate the kriging theta values with an initial random search using
        a Sobol sequence of size samples.
        """
        # get input parameters of the kriging algorithm
        X = algoKriging.getInputSample()
        Y = algoKriging.getOutputSample()

        algoKriging.run()
        krigingResult = algoKriging.getResult()
        covarianceModel = krigingResult.getCovarianceModel()
        basis = krigingResult.getBasisCollection()
        if LooseVersion(ot.__version__) == '1.9':
            llf = algoKriging.getReducedLogLikelihoodFunction()
        else:
            llf = algoKriging.getLogLikelihoodFunction()

        # create uniform distribution of the parameters bounds
        dim = len(lowerBound)
        distBoundCol = []
        for i in range(dim):
            distBoundCol += [ot.Uniform(lowerBound[i], upperBound[i])]
        distBound = ot.ComposedDistribution(distBoundCol)

        if size > 0:
            # Generate starting points with a low discrepancy sequence
            thetaStart = ot.LowDiscrepancyExperiment(ot.SobolSequence(),
                                                     distBound,
                                                     size).generate()
            # Get the best theta from the maximum llf value
            llfValue = llf(thetaStart)
            indexMax = int(np.argmax(llfValue))
            bestTheta = thetaStart[indexMax]

            # update theta after random search
            if LooseVersion(ot.__version__) == '1.6':
                covarianceModel.setScale(bestTheta)
            elif LooseVersion(ot.__version__) > '1.6':
                # optimize theta and sigma in ot 1.8
                covarianceModel.setScale(bestTheta[:-1])
                covarianceModel.setAmplitude([bestTheta[-1]])

        # Now the KrigingAlgorithm is used to optimize the likelihood using a
        # good starting point
        if LooseVersion(ot.__version__) == "1.9":
            algoKriging = ot.KrigingAlgorithm(X, Y, covarianceModel, basis)
        else:
            algoKriging = ot.KrigingAlgorithm(X, Y, basis, covarianceModel,
                                              True)

        # set TNC optim
        searchInterval = ot.Interval(lowerBound, upperBound)
        if LooseVersion(ot.__version__) == '1.6':
            optimizer = ot.TNC()
            optimizer.setBoundConstraints(searchInterval)
            algoKriging.setOptimizer(optimizer)
        elif LooseVersion(ot.__version__) in ['1.7', '1.8']:
            optimizer = algoKriging.getOptimizationSolver()
            problem = optimizer.getProblem()
            problem.setBounds(searchInterval)
            optimizer.setProblem(problem)
            algoKriging.setOptimizationSolver(optimizer)
        elif LooseVersion(ot.__version__) == '1.9':
            algoKriging.setOptimizationBounds(searchInterval)

        return algoKriging
Example #6
0
ot.TESTPREAMBLE()
# ot.Log.Show(ot.Log.ALL)

dim = 2

# problem
model = ot.SymbolicFunction(['x', 'y'], [
    '3*(1-x)^2*exp(-x^2-(y+1)^2)-10*(x/5-x^3-y^5)*exp(-x^2-y^2)-exp(-(x+1)^2-y^2)/3'
])
bounds = ot.Interval([-3.0] * dim, [3.0] * dim)
problem = ot.OptimizationProblem(model)
problem.setBounds(bounds)

# solver
solver = ot.TNC(problem)

# run locally
solver.setStartingPoint([0.0] * dim)
algo = solver
algo.run()
result = algo.getResult()
local_optimal_point = [0.296446, 0.320196]
local_optimal_value = [-0.0649359]
ott.assert_almost_equal(result.getOptimalPoint(), local_optimal_point, 1e-5,
                        0.0)
ott.assert_almost_equal(result.getOptimalValue(), local_optimal_value, 1e-5,
                        0.0)

# multistart
lower_bound = bounds.getLowerBound()
Example #7
0
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot

# linear
levelFunction = ot.NumericalMathFunction(["x1", "x2", "x3", "x4"], ["y1"],
                                         ["x1+2*x2-3*x3+4*x4"])
startingPoint = ot.NumericalPoint(4, 0.0)
bounds = ot.Interval(ot.NumericalPoint(4, -3.0), ot.NumericalPoint(4, 5.0))
algo = ot.TNC()
algo.setStartingPoint(startingPoint)

problem = ot.OptimizationProblem()
problem.setBounds(bounds)
problem.setObjective(levelFunction)
problem.setMinimization(True)

algo.setProblem(problem)
print('algo=', algo)
algo.run()
result = algo.getResult()
print('result=', result)

problem.setMinimization(False)
algo.setProblem(problem)
print('algo=', algo)
algo.run()
result = algo.getResult()
print('result=', result)
print(basis)

# 2. covariance model
cov = ot.MaternModel([1.], [2.5], 1.5)
print(cov)

# 3. kriging algorithm
algokriging = ot.KrigingAlgorithm(x, y, cov, basis)

## error measure
#algokriging.setNoise([5*1e-1]*n_pt)

# 4. Optimization
# algokriging.setOptimizationAlgorithm(ot.NLopt('GN_DIRECT'))
startingPoint = ot.LHSExperiment(ot.Uniform(1e-1, 1e2), 50).generate()
algokriging.setOptimizationAlgorithm(ot.MultiStart(ot.TNC(), startingPoint))
algokriging.setOptimizationBounds(ot.Interval([0.1], [1e2]))

# if we choose not to optimize parameters
#algokriging.setOptimizeParameters(False)

# 5. run the algorithm
algokriging.run()

# %%
# Results
# -------

# %%
# get some results
krigingResult = algokriging.getResult()
result = algo.getResult()
# print('1st pass result=', result)
print('iteration=', result.getIterationNumber())
assert result.getIterationNumber(
) > 3 and result.getIterationNumber() < 15, 'Too few/much iterations'
print(result.getInputSample())
print(result.getOutputSample())

# openturns.testing.assert_almost_equal(result.getOptimalPoint(), [0.5, 0.0], 1e-5, 1e-5)
# openturns.testing.assert_almost_equal(result.getOptimalValue(),
# [-0.802223], 1e-5, 1e-5)

# local refinement eventhough the model is noisy (we still want to check
# we're not too far from optimum)
problem.setObjective(model.getMarginal(0))
algo2 = ot.TNC(problem)
# we have to use getFinalPoints as our objective function is 2-d
algo2.setStartingPoint(result.getOptimalPoint())
algo2.run()
result = algo2.getResult()
# print(result)
# openturns.testing.assert_almost_equal(result.getOptimalPoint(), [0.542773, 0.151666], 1e-5, 1e-5)
# openturns.testing.assert_almost_equal(result.getOptimalPoint(),
# [0.123895, 0.818329], 1e-5, 1e-5)
openturns.testing.assert_almost_equal(
    result.getOptimalPoint(), [0.961652, 0.165000], 1e-5, 1e-5)
openturns.testing.assert_almost_equal(
    result.getOptimalValue(), [-0.979476], 1e-5, 1e-5)


#
Example #10
0
import math as m
import sys

ot.TESTPREAMBLE()
ot.PlatformInfo.SetNumericalPrecision(3)

m = 10
x = [[0.5 + i] for i in range(m)]

#ot.ResourceMap.SetAsUnsignedInteger( "OptimizationAlgorithm-DefaultMaximumEvaluationNumber", 100)
inVars = ["a", "b", "c", "x"]
formulas = ["a + b * exp(c * x)", "(a * x^2 + b) / (c + x^2)"]
model = ot.SymbolicFunction(inVars, formulas)
p_ref = [2.8, 1.2, 0.5]
params = [0, 1, 2]
modelX = ot.ParametricFunction(model, params, p_ref)
y = modelX(x)
y += ot.Normal([0.0]*2, [0.05]*2, ot.IdentityMatrix(2)).getSample(m)
candidate = [1.0]*3
bootstrapSizes = [0, 100]
for bootstrapSize in bootstrapSizes:
    algo = ot.NonLinearLeastSquaresCalibration(modelX, x, y, candidate)
    algo.setBootstrapSize(bootstrapSize)
    algo.run()
    # To avoid discrepance between the plaforms with or without CMinpack
    print("result (Auto)=", algo.getResult().getParameterMAP())
    algo.setAlgorithm(ot.MultiStart(ot.TNC(), ot.LowDiscrepancyExperiment(ot.SobolSequence(), ot.Normal(candidate, ot.CovarianceMatrix(ot.Point(candidate).getDimension())), ot.ResourceMap.GetAsUnsignedInteger("NonLinearLeastSquaresCalibration-MultiStartSize")).generate()))
    algo.run()
    # To avoid discrepance between the plaforms with or without CMinpack
    print("result  (TNC)=", algo.getResult().getParameterMAP())
# %%
# Create the model
model = ot.SymbolicFunction(['E', 'F', 'L', 'I'], ['F*L^3/(3*E*I)'])

# %%
# Define the problems
minProblem = ot.OptimizationProblem(model)
minProblem.setBounds(bounds)

maxProblem = ot.OptimizationProblem(model)
maxProblem.setBounds(bounds)
maxProblem.setMinimization(False)

# %%
# Create a solver
solver = ot.TNC()
solver.setStartingPoint(distribution.getMean())

# %%
# Solve the problems
solver.setProblem(minProblem)
solver.run()
minResult = solver.getResult()
print('min: y=', minResult.getOptimalValue(), 'with x=',
      minResult.getOptimalPoint())

solver.setProblem(maxProblem)
solver.run()
maxResult = solver.getResult()
print('max: y=', maxResult.getOptimalValue(), 'with x=',
      maxResult.getOptimalPoint())
Example #12
0
lowerBound = ot.NumericalPoint((-1.0, 1.0e-4))
upperBound = ot.NumericalPoint((3.0, 2.0))
finiteLowerBound = ot.BoolCollection((0, 1))
finiteUpperBound = ot.BoolCollection((0, 0))
bounds = ot.Interval(lowerBound, upperBound, finiteLowerBound,
                     finiteUpperBound)

# Create the starting point of the research
# For mu : the first point
# For sigma : a value evaluated from the two first data
startingPoint = ot.NumericalPoint(2)
startingPoint[0] = sample[0][0]
startingPoint[1] = m.sqrt(
    (sample[1][0] - sample[0][0]) * (sample[1][0] - sample[0][0]))

# Create the optimization problem
problem = ot.OptimizationProblem(myLogLikelihoodOT, ot.NumericalMathFunction(),
                                 ot.NumericalMathFunction(), bounds)
problem.setMinimization(False)

# Create the TNC algorithm
myAlgoTNC = ot.TNC(problem)
myAlgoTNC.setStartingPoint(startingPoint)

# Run the algorithm and extract results
myAlgoTNC.run()
resMLE = myAlgoTNC.getResult()
MLEparameters = resMLE.getOptimalPoint()
print("MLE of (mu, sigma) = (", MLEparameters[0], ", ", MLEparameters[1], ")")
# END_TEX