示例#1
0
    def _estimKrigingTheta(self, algoKriging, lowerBound, upperBound, size):
        """
        Estimate the kriging theta values with an initial random search using
        a Sobol sequence of size samples.
        """

        if size > 0:
            # create uniform distribution of the parameters bounds
            dim = len(lowerBound)
            distBoundCol = []
            for i in range(dim):
                distBoundCol += [ot.Uniform(lowerBound[i], upperBound[i])]
            distBound = ot.ComposedDistribution(distBoundCol)

            # set the bounds
            searchInterval = ot.Interval(lowerBound, upperBound)
            algoKriging.setOptimizationBounds(searchInterval)
            # Generate starting points with a low discrepancy sequence
            startingPoint = ot.LowDiscrepancyExperiment(
                ot.SobolSequence(), distBound, size).generate()

            algoKriging.setOptimizationAlgorithm(
                ot.MultiStart(ot.TNC(), startingPoint))
        else:
            algoKriging.setOptimizeParameters(False)

        return algoKriging
# Create the problem and set the optimization algorithm
# -----------------------------------------------------

# %%
problem = ot.OptimizationProblem(rastrigin)

# %%
# We use the :class:`~openturns.Cobyla` algorithm and run it from multiple starting points selected by a :class:`~openturns.LowDiscrepancyExperiment`.

# %%
size = 64
distribution = ot.ComposedDistribution(
    [ot.Uniform(lowerbound[0], upperbound[0])] * dim)
experiment = ot.LowDiscrepancyExperiment(
    ot.SobolSequence(), distribution, size)
solver = ot.MultiStart(ot.Cobyla(problem), experiment.generate())

# %%
# Visualize the starting points of the optimization algorithm
# -----------------------------------------------------------

# %%
startingPoints = solver.getStartingSample()
graph = rastrigin.draw(lowerbound, upperbound, [100]*dim)
graph.setTitle("Rastrigin function")
cloud = ot.Cloud(startingPoints)
cloud.setPointStyle("bullet")
cloud.setColor("black")
graph.add(cloud)
graph.setLegends([""])
# sphinx_gallery_thumbnail_number = 2
    for j in range(i):
        globalErrorCovariance[i, j] = 1.0 / (1.0 + i + j)
bootstrapSizes = [0, 100]
for bootstrapSize in bootstrapSizes:
    algo = ot.GaussianNonLinearCalibration(modelX, x, y, candidate,
                                           priorCovariance, errorCovariance)
    algo.setBootstrapSize(bootstrapSize)
    algo.run()
    # To avoid discrepance between the plaforms with or without CMinpack
    print("result   (Auto)=", algo.getResult().getParameterMAP())
    algo.setOptimizationAlgorithm(
        ot.MultiStart(
            ot.TNC(),
            ot.LowDiscrepancyExperiment(
                ot.SobolSequence(),
                ot.Normal(
                    candidate,
                    ot.CovarianceMatrix(ot.Point(candidate).getDimension())),
                ot.ResourceMap.GetAsUnsignedInteger(
                    "GaussianNonLinearCalibration-MultiStartSize")).generate())
    )
    algo.run()
    # To avoid discrepance between the plaforms with or without CMinpack
    print("result    (TNC)=", algo.getResult().getParameterMAP())
    algo = ot.GaussianNonLinearCalibration(modelX, x, y, candidate,
                                           priorCovariance,
                                           globalErrorCovariance)
    algo.setBootstrapSize(bootstrapSize)
    algo.run()
    print("result (Global)=", algo.getResult().getParameterMAP())
# 2. covariance model
cov = ot.MaternModel([1.], [2.5], 1.5)
print(cov)

# 3. kriging algorithm
algokriging = ot.KrigingAlgorithm(x, y, cov, basis)

## error measure
#algokriging.setNoise([5*1e-1]*n_pt)

# 4. Optimization
# algokriging.setOptimizationAlgorithm(ot.NLopt('GN_DIRECT'))
lhsExperiment = ot.LHSExperiment(ot.Uniform(1e-1, 1e2), 50)
algokriging.setOptimizationAlgorithm(
    ot.MultiStart(ot.TNC(), lhsExperiment.generate()))
algokriging.setOptimizationBounds(ot.Interval([0.1], [1e2]))

# if we choose not to optimize parameters
#algokriging.setOptimizeParameters(False)

# 5. run the algorithm
algokriging.run()

# %%
# Results
# -------

# %%
# get some results
krigingResult = algokriging.getResult()
示例#5
0
ott.assert_almost_equal(result.getOptimalPoint(), local_optimal_point, 1e-5,
                        0.0)
ott.assert_almost_equal(result.getOptimalValue(), local_optimal_value, 1e-5,
                        0.0)

# multistart
lower_bound = bounds.getLowerBound()
upper_bound = bounds.getUpperBound()
dist_collection = []
for i in range(dim):
    dist_collection.append(ot.Uniform(lower_bound[i], upper_bound[i]))
distribution = ot.ComposedDistribution(dist_collection)
size = 20
experiment = ot.LHSExperiment(distribution, size)
startingSample = experiment.generate()
algo = ot.MultiStart(solver, startingSample)
algo.setMaximumEvaluationNumber(2000)
algo.run()
result = algo.getResult()
true_optimal_point = [0.228279, -1.62553]
true_optimal_value = [-6.55113]
ott.assert_almost_equal(result.getOptimalPoint(), true_optimal_point, 1e-5,
                        0.0)
ott.assert_almost_equal(result.getOptimalValue(), true_optimal_value, 1e-5,
                        0.0)
n_local_results = algo.getResultCollection().getSize()
assert n_local_results == 13, "n_local_results is wrong"
algo.setKeepResults(False)
algo.run()
n_local_results_nokeep = algo.getResultCollection().getSize()
assert n_local_results_nokeep == 0, "n_local_results_nokeep is wrong"
# %%
# We can check that the minimum and maximum in the sample correspond to the bounds of the design of experiment.

# %%
lbounds, ubounds

# %%
starting_points.getMin(), starting_points.getMax()

# %%
# Then we create a `MultiStart` algorithm based on the LHS starting points.

# %%
solver.setMaximumIterationNumber(10000)
multiStartSolver = ot.MultiStart(solver, starting_points)

# %%
# Finally, we configure the optimization algorithm so as to use the `MultiStart` algorithm. 

# %%
algo = ot.KrigingAlgorithm(X_train, Y_train, covarianceModel, basis)
algo.setOptimizationAlgorithm(multiStartSolver)
algo.run()

# %%
finetune_covariance_model = result.getCovarianceModel()
finetune_covariance_model

# %%
printCovarianceParameterChange(finetune_covariance_model,basic_covariance_model)
print(basis)

# 2. covariance model
cov = ot.MaternModel([1.], [2.5], 1.5)
print(cov)

# 3. kriging algorithm
algokriging = ot.KrigingAlgorithm(x, y, cov, basis)

## error measure
#algokriging.setNoise([5*1e-1]*n_pt)

# 4. Optimization
# algokriging.setOptimizationAlgorithm(ot.NLopt('GN_DIRECT'))
startingPoint = ot.LHSExperiment(ot.Uniform(1e-1, 1e2), 50).generate()
algokriging.setOptimizationAlgorithm(ot.MultiStart(ot.TNC(), startingPoint))
algokriging.setOptimizationBounds(ot.Interval([0.1], [1e2]))

# if we choose not to optimize parameters
#algokriging.setOptimizeParameters(False)

# 5. run the algorithm
algokriging.run()

# %%
# Results
# -------

# %%
# get some results
krigingResult = algokriging.getResult()
示例#8
0
bounds = ot.Interval([-3.0] * dim, [3.0] * dim)
problem = ot.OptimizationProblem(model)
problem.setBounds(bounds)

# solver
solver = ot.TNC(problem)

# run locally
solver.setStartingPoint([0.0] * dim)
algo = solver
algo.run()
result = algo.getResult()
print('-- local search x*=', result.getOptimalPoint(), 'f(x*)=',
      result.getOptimalValue())

# multistart
distribution = ot.Normal([0.0] * dim, [2.0] * dim, ot.CorrelationMatrix(dim))
size = 20
experiment = ot.LHSExperiment(distribution, size)
startingPoints = experiment.generate()
algo = ot.MultiStart(solver, startingPoints)
algo.setMaximumEvaluationNumber(2000)
algo.run()
result = algo.getResult()
print('-- multistart x*=', result.getOptimalPoint(), 'f(x*)=',
      result.getOptimalValue())
print('n_local_results=', len(algo.getResultCollection()))
algo.setKeepResults(False)
algo.run()
print('n_local_results (no keep)=', len(algo.getResultCollection()))
示例#9
0
import math as m
import sys

ot.TESTPREAMBLE()
ot.PlatformInfo.SetNumericalPrecision(3)

m = 10
x = [[0.5 + i] for i in range(m)]

#ot.ResourceMap.SetAsUnsignedInteger( "OptimizationAlgorithm-DefaultMaximumEvaluationNumber", 100)
inVars = ["a", "b", "c", "x"]
formulas = ["a + b * exp(c * x)", "(a * x^2 + b) / (c + x^2)"]
model = ot.SymbolicFunction(inVars, formulas)
p_ref = [2.8, 1.2, 0.5]
params = [0, 1, 2]
modelX = ot.ParametricFunction(model, params, p_ref)
y = modelX(x)
y += ot.Normal([0.0]*2, [0.05]*2, ot.IdentityMatrix(2)).getSample(m)
candidate = [1.0]*3
bootstrapSizes = [0, 100]
for bootstrapSize in bootstrapSizes:
    algo = ot.NonLinearLeastSquaresCalibration(modelX, x, y, candidate)
    algo.setBootstrapSize(bootstrapSize)
    algo.run()
    # To avoid discrepance between the plaforms with or without CMinpack
    print("result (Auto)=", algo.getResult().getParameterMAP())
    algo.setOptimizationAlgorithm(ot.MultiStart(ot.TNC(), ot.LowDiscrepancyExperiment(ot.SobolSequence(), ot.Normal(candidate, ot.CovarianceMatrix(ot.Point(candidate).getDimension())), ot.ResourceMap.GetAsUnsignedInteger("NonLinearLeastSquaresCalibration-MultiStartSize")).generate()))
    algo.run()
    # To avoid discrepance between the plaforms with or without CMinpack
    print("result  (TNC)=", algo.getResult().getParameterMAP())