Exemple #1
0
    def optimizeLambda(self, marginal, deltaRHS):
        """
        Compute the lambda values

        Parameters
        ----------
        marginal : int
            The indice of the perturbed marginal.
        deltaRHS : sequence of float of dim 2
            The values of the mean and variance + mean^2
        """

        # define the optimization function which the Lagrange function
        # and using the gradient and the hessian
        optimFunc = ot.PythonFunction(2, 1, lambda lamb: [self.H(marginal, lamb, deltaRHS)],
                            gradient=lambda lamb: self.gradH(marginal, lamb, deltaRHS),
                            hessian=lambda lamb: self.hessianH(marginal, lamb))

        # define the optimization problem
        optimPb = ot.OptimizationProblem(optimFunc,
                                         ot.NumericalMathFunction(),
                                         ot.NumericalMathFunction(),
                                         ot.Interval())

        # solve the problem using SLSQP from NLopt
        optim = ot.NLopt(optimPb, 'LD_SLSQP')
        optim.setStartingPoint([0, 0])
        optim.run()
        # return the lambda values, solution of the problem
        return optim.getResult().getOptimalPoint()
 def test_gtapprox(self):
     input_dim = 2
     count = 10
     inputs = np.random.random((count, input_dim))
     outputs = [[x1 * x1 + x2 * x2] for (x1, x2) in inputs]
     # p7core model
     p7_model = gtapprox.Builder().build(inputs, outputs)
     # p7ot model function
     p7ot_function = ModelFunction(p7_model)
     # openturns function
     ot_function = ot.NumericalMathFunction(p7ot_function)
     # openturns optimization problem
     ot_optimization_problem = ot.OptimizationProblem()
     ot_optimization_problem.setObjective(p7ot_function)
     ot_optimization_problem.setBounds(ot.Interval([-100] * 2, [100] * 2))
     p7ot_solver = GTOpt(ot_optimization_problem)
     p7ot_solver.run()
     # openturns graphic
     input_dim = 1
     count = 10
     inputs = np.random.uniform(0, 10, [count, input_dim])
     outputs = [math.sin(x) for x in inputs]
     p7ot_function = ModelFunction(gtapprox.Builder().build(
         inputs, outputs))
     graph = p7ot_function.draw(0, 10, 100)
Exemple #3
0
    def test_Multiobjective(self):
        dim = 4
        bounds = ot.Interval([-500] * dim, [500] * dim)
        inputs = ['x' + str(i) for i in range(dim)]
        # Rosenbrock function
        formulas = ['']
        for i in range(dim - 1):
            formulas[0] += '(1-x%d)^2+100*(x%d-x%d^2)^2%s' % (
                i, i + 1, i, '' if i == dim - 2 else '+')
        rosenbrock = ot.NumericalMathFunction(inputs, formulas)
        # Sphere function
        formulas = ['']
        for i in range(dim):
            formulas[0] += 'x%d^2%s' % (i, '' if i == dim - 1 else '+')
        sphere = ot.NumericalMathFunction(inputs, formulas)
        # Objective function
        objective = ot.NumericalMathFunction([rosenbrock, sphere])
        # Define optimization problem
        problem = ot.OptimizationProblem()
        problem.setObjective(objective)
        problem.setBounds(bounds)
        # Prepare solver
        solver = GTOpt(problem)
        solver.setStartingPoint(bounds.getUpperBound())
        # Run optimization and get result
        solver.run()
        result = solver.getResult()
        # Asserts
        outputs_dim = objective.getOutputDimension()
        self.assertEqual(result.getOptimalValue().getDimension(), outputs_dim)
        self.assertEqual(dim + outputs_dim,
                         solver.getP7History().getDimension())
        self.assertEqual(result.getIterationNumber(),
                         solver.getP7History().getSize())

        # Compare the results with those of p7core gtopt

        class Problem(gtopt.ProblemGeneric):
            def prepare_problem(self):
                p7_bounds = zip(bounds.getLowerBound(), bounds.getUpperBound())
                for j in range(dim):
                    self.add_variable(
                        bounds=p7_bounds[j],
                        initial_guess=solver.getStartingPoint()[j])
                self.add_objective()
                self.add_objective()

            def evaluate(self, queryx, querymask):
                functions_batch = []
                output_masks_batch = []
                for x, mask in zip(queryx, querymask):
                    functions_batch.append(list(objective(x)))
                    output_masks_batch.append(mask)
                return functions_batch, output_masks_batch

        self.compare_results(p7ot_result=solver.getP7Result(),
                             problem=Problem())
Exemple #4
0
    def test_Maximization(self):
        dim = 2
        bounds = ot.Interval([-500] * dim, [500] * dim)
        # Objective function
        objective = ot.NumericalMathFunction(
            ['x1', 'x2'], ['- (x1+2*x2-7)^2 - (2*x1+x2-5)^2 + 1'])
        # Define optimization problem
        problem = ot.OptimizationProblem()
        problem.setObjective(objective)
        problem.setBounds(bounds)
        # Maximization problem
        problem.setMinimization(False)
        # Prepare solver
        solver = GTOpt(problem)
        solver.setStartingPoint(bounds.getUpperBound())
        # Run optimization and get result
        solver.run()
        result = solver.getResult()
        # Asserts
        outputs_dim = objective.getOutputDimension()
        self.assertEqual(result.getOptimalValue().getDimension(), outputs_dim)
        self.assertEqual(dim + outputs_dim,
                         solver.getP7History().getDimension())
        self.assertEqual(result.getIterationNumber(),
                         solver.getP7History().getSize())

        # Compare the results with those of p7core gtopt

        class Problem(gtopt.ProblemGeneric):
            def prepare_problem(self):
                p7_bounds = zip(bounds.getLowerBound(), bounds.getUpperBound())
                self.add_variable(bounds=p7_bounds[0],
                                  initial_guess=solver.getStartingPoint()[0])
                self.add_variable(bounds=p7_bounds[1],
                                  initial_guess=solver.getStartingPoint()[1])
                self.add_objective()

            def evaluate(self, queryx, querymask):
                functions_batch = []
                output_masks_batch = []
                for x, mask in zip(queryx, querymask):
                    functions_batch.append(
                        [-1 * value for value in objective(x)])
                    output_masks_batch.append(mask)
                return functions_batch, output_masks_batch

        self.compare_results(p7ot_result=solver.getP7Result(),
                             problem=Problem(),
                             maximization=True)
    def test_gtopt(self):
        # Schaffer function
        objective = ot.NumericalMathFunction(
            ['x', 'y'],
            ['0.5 + ((sin(x^2-y^2))^2-0.5)/((1+0.001*(x^2+y^2))^2)'])
        lb = [-10] * objective.getInputDimension()
        ub = [10] * objective.getInputDimension()
        # Solve by p7ot.GTOpt
        problem = ot.OptimizationProblem()
        problem.setObjective(objective)
        problem.setBounds(ot.Interval(lb, ub))
        solver = GTOpt(problem)
        solver.setStartingPoint(ub)
        solver.run()
        ot_result = solver.getResult()

        # Solve by p7core.gtopt.Solver

        class Problem(gtopt.ProblemGeneric):
            def prepare_problem(self):
                for i in range(objective.getInputDimension()):
                    self.add_variable(bounds=(lb[i], ub[i]),
                                      initial_guess=ub[i])
                self.add_objective()

            def evaluate(self, queryx, querymask):
                functions_batch = []
                output_masks_batch = []
                for x, mask in zip(queryx, querymask):
                    functions_batch.append(objective(x))
                    output_masks_batch.append(mask)
                return functions_batch, output_masks_batch

        p7_problem = Problem()
        p7_result = gtopt.Solver().solve(problem=p7_problem)
        # Compare results
        self.assertEqual(ot_result.getOptimalValue(), p7_result.optimal.f[0])
        self.assertEqual(ot_result.getOptimalPoint(), p7_result.optimal.x[0])
        self.assertEqual(ot_result.getIterationNumber(),
                         len(p7_problem.history))
Exemple #6
0
#! /usr/bin/env python

import openturns as ot
import openturns.testing as ott

ot.TESTPREAMBLE()
# ot.Log.Show(ot.Log.ALL)

dim = 2

# problem
model = ot.SymbolicFunction(['x', 'y'], [
    '3*(1-x)^2*exp(-x^2-(y+1)^2)-10*(x/5-x^3-y^5)*exp(-x^2-y^2)-exp(-(x+1)^2-y^2)/3'
])
bounds = ot.Interval([-3.0] * dim, [3.0] * dim)
problem = ot.OptimizationProblem(model)
problem.setBounds(bounds)

# solver
solver = ot.TNC(problem)

# run locally
solver.setStartingPoint([0.0] * dim)
algo = solver
algo.run()
result = algo.getResult()
local_optimal_point = [0.296446, 0.320196]
local_optimal_value = [-0.0649359]
ott.assert_almost_equal(result.getOptimalPoint(), local_optimal_point, 1e-5,
                        0.0)
ott.assert_almost_equal(result.getOptimalValue(), local_optimal_value, 1e-5,
for algo in ot.Dlib.GetAlgorithmNames():
    print(algo)

# %%
# More details on dlib algorithms are available `here <http://dlib.net/optimization.html>`_ .

# %%
# Solving an unconstrained problem with conjugate gradient algorithm
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The following example will demonstrate the use of dlib conjugate gradient algorithm to find the minimum of `Rosenbrock function <https://en.wikipedia.org/wiki/Rosenbrock_function>`_. The optimal point can be computed analytically, and its value is [1.0, 1.0].

# %%
# Define the problem based on Rosebrock function
rosenbrock = ot.SymbolicFunction(['x1', 'x2'], ['(1-x1)^2+(x2-x1^2)^2'])
problem = ot.OptimizationProblem(rosenbrock)

# %%
# The optimization algorithm is instanciated from the problem to solve and the name of the algorithm
algo = ot.Dlib(problem, 'cg')
print("Dlib algorithm, type ", algo.getAlgorithmName())
print("Maximum iteration number: ", algo.getMaximumIterationNumber())
print("Maximum evaluation number: ", algo.getMaximumEvaluationNumber())
print("Maximum absolute error: ", algo.getMaximumAbsoluteError())
print("Maximum relative error: ", algo.getMaximumRelativeError())
print("Maximum residual error: ", algo.getMaximumResidualError())
print("Maximum constraint error: ", algo.getMaximumConstraintError())

# %%
# When using conjugate gradient, BFGS/LBFGS, Newton, least squares or trust region methods, optimization proceeds until one of the following criteria is met:
#
Exemple #8
0
    print("      -- Optimal point = ", result.getOptimalPoint())
    print("      -- Optimal value = ", result.getOptimalValue())
    print("      -- Iteration number = ", result.getIterationNumber())
    print("      -- Evaluation number = ", result.getEvaluationNumber())
    print("      -- Absolute error = {:.6e}".format(result.getAbsoluteError()))
    print("      -- Relative error = {:.6e}".format(result.getRelativeError()))
    print("      -- Residual error = {:.6e}".format(result.getResidualError()))
    print("      -- Constraint error = {:.6e}".format(
        result.getConstraintError()))


# Define the problems based on Rastrigin function
rastrigin = ot.SymbolicFunction(
    ['x1', 'x2'], ['20 + x1^2 - 10*cos(2*pi_*x1) + x2^2 - 10*cos(2*pi_*x2)'])

unboundedProblem = ot.OptimizationProblem(rastrigin)

notConstrainingBounds = ot.Interval([-5.0, -5.0], [3.0, 2.0])
notConstrainingBoundsProblem = ot.OptimizationProblem(rastrigin, ot.Function(),
                                                      ot.Function(),
                                                      notConstrainingBounds)

constrainingBounds = ot.Interval([-1.0, -2.0], [5.0, -0.5])
constrainingBoundsProblem = ot.OptimizationProblem(rastrigin, ot.Function(),
                                                   ot.Function(),
                                                   constrainingBounds)

boundedPref = [0.0, -1.0]
unboundedPref = [0.0, 0.0]

## GLOBAL ALGORITHM ##
Exemple #9
0
    def run(self):

        bounds = self.bounds
        if self.operator(self.S, self.S + 1) == True:
            quantile_courant = ot.Point([self.S + 1])
        else:
            quantile_courant = ot.Point([self.S - 1])

        theta_0 = self.initial_theta
        num_iter = 0
        #main loop of adaptive importance sampling
        while self.operator(self.S, quantile_courant[0]):

            theta_courant = theta_0
            self.update_dist(theta_0)
            Sample = self.aux_distrib.getSample(
                self.n_IS)  # drawing of samples using auxiliary density
            Resp_sample = self.limit_state_function(
                Sample)  #evaluation on limit state function
            quantile_courant = Resp_sample.computeQuantile(
                self.rho_quantile)  #computation of current quantile

            f_opt = lambda theta: self.obj_func(
                Sample, Resp_sample, theta, quantile_courant
            )  #definition of objective function for CE

            objective = ot.PythonFunction(self.dim_theta, 1, f_opt)

            problem = ot.OptimizationProblem(
                objective
            )  # Definition of CE optimization  problemof auxiliary distribution parameters
            problem.setBounds(bounds)
            problem.setMinimization(False)

            algo_optim = ot.Dlib(problem, 'Global')
            algo_optim.setMaximumIterationNumber(50000)
            algo_optim.setStartingPoint(theta_0)
            algo_optim.run()  #Run of CE optimization

            # retrieve results
            result = algo_optim.getResult()
            theta_0 = result.getOptimalPoint()
            if self.verbose == True:
                if num_iter == 0:
                    print('theta', '| current threshold')
                    print(theta_0, '|', quantile_courant[0])
                else:
                    print(theta_0, '|', quantile_courant[0])

            num_iter += 1

        #Estimate probability
        self.update_dist(theta_courant)  #update of auxiliary density
        y = np.array([
            self.operator(Resp_sample[i][0], self.S)
            for i in range(Resp_sample.getSize())
        ])  #find failure points
        indices_critic = np.where(
            y == True)[0].tolist()  # find failure samples indices

        Resp_sample_critic = Resp_sample.select(indices_critic)
        Sample_critic = Sample.select(indices_critic)

        pdf_init_critic = self.distrib.computePDF(
            Sample_critic)  #evaluate initial PDF on failure samples
        pdf_aux_critic = self.aux_distrib.computePDF(
            Sample_critic)  #evaluate auxiliary PDF on failure samples
        proba = 1 / self.n_IS * np.sum(
            np.array([pdf_init_critic]) /
            np.array([pdf_aux_critic]))  #Calculation of failure probability

        self.proba = proba
        self.samples = Sample

        # Save of data in SimulationResult structure
        self.result.setProbabilityEstimate(proba)
        self.result.setCESamples(Sample)
        self.result.setAuxiliaryDensity(self.aux_distrib)
        return None
marginals = [dist_E, dist_F, dist_L, dist_I]
distribution = ot.ComposedDistribution(marginals)

# %%
# Define bounds
lowerBound = [marginal.computeQuantile(0.1)[0] for marginal in marginals]
upperBound = [marginal.computeQuantile(0.9)[0] for marginal in marginals]
bounds = ot.Interval(lowerBound, upperBound)

# %%
# Create the model
model = ot.SymbolicFunction(['E', 'F', 'L', 'I'], ['F*L^3/(3*E*I)'])

# %%
# Define the problems
minProblem = ot.OptimizationProblem(model)
minProblem.setBounds(bounds)

maxProblem = ot.OptimizationProblem(model)
maxProblem.setBounds(bounds)
maxProblem.setMinimization(False)

# %%
# Create a solver
solver = ot.TNC()
solver.setStartingPoint(distribution.getMean())

# %%
# Solve the problems
solver.setProblem(minProblem)
solver.run()
Exemple #11
0
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot

levelFunction = ot.NumericalMathFunction(["x1", "x2", "x3", "x4"], ["y1"],
                                         ["x1+2*x2-3*x3+4*x4"])
# Add a finite difference gradient to the function, as Abdo Rackwitz algorithm
# needs it
myGradient = ot.NonCenteredFiniteDifferenceGradient(
    1e-7, levelFunction.getEvaluation())
print("myGradient = ", repr(myGradient))
# Substitute the gradient
levelFunction.setGradient(ot.NonCenteredFiniteDifferenceGradient(myGradient))
startingPoint = [0.0] * 4
algo = ot.AbdoRackwitz(ot.OptimizationProblem(levelFunction, 3.0))
algo.setStartingPoint(startingPoint)
algo.run()
print("result = ", algo.getResult())

levelFunction = ot.NumericalMathFunction(["x1", "x2", "x3", "x4"], ["y1"],
                                         ["x1*cos(x1)+2*x2*x3-3*x3+4*x3*x4"])
# Add a finite difference gradient to the function, as Abdo Rackwitz algorithm
# needs it
myGradient = ot.NonCenteredFiniteDifferenceGradient(
    1e-7, levelFunction.getEvaluation())
print("myGradient = ", repr(myGradient))
# Substitute the gradient
levelFunction.setGradient(ot.NonCenteredFiniteDifferenceGradient(myGradient))
startingPoint = [0.0] * 4
algo = ot.AbdoRackwitz(ot.OptimizationProblem(levelFunction, -0.5))
Exemple #12
0
import openturns as ot

ot.PlatformInfo.SetNumericalPrecision(4)

# linear
levelFunction = ot.NumericalMathFunction(
    ["x1", "x2", "x3", "x4"], ["y1"], ["x1+2*x2-3*x3+4*x4"])
# Add a finite difference gradient to the function
myGradient = ot.NonCenteredFiniteDifferenceGradient(
    1e-7, levelFunction.getEvaluation())
print("myGradient = ", repr(myGradient))
# Substitute the gradient
levelFunction.setGradient(
    ot.NonCenteredFiniteDifferenceGradient(myGradient))
startingPoint = ot.NumericalPoint(4, 0.0)
algo = ot.SQP(ot.OptimizationProblem(levelFunction, 3.0))
algo.setStartingPoint(startingPoint)
print('algo=', algo)
algo.run()
result = algo.getResult()
print('result=', result)


# non-linear
levelFunction = ot.NumericalMathFunction(
    ["x1", "x2", "x3", "x4"], ["y1"], ["x1*cos(x1)+2*x2*x3-3*x3+4*x3*x4"])
# Add a finite difference gradient to the function,
# needs it
myGradient = ot.NonCenteredFiniteDifferenceGradient(
    1e-7, levelFunction.getEvaluation())
# Substitute the gradient
#
# branin

dim = 2

# model
branin = ot.SymbolicFunction(['x1', 'x2'], [
    '((x2-(5.1/(4*_pi^2))*x1^2+5*x1/_pi-6)^2+10*(1-1/8*_pi)*cos(x1)+10-54.8104)/51.9496',
    '0.96'
])
transfo = ot.SymbolicFunction(['u1', 'u2'], ['15*u1-5', '15*u2'])
model = ot.ComposedFunction(branin, transfo)

# problem
problem = ot.OptimizationProblem()
problem.setObjective(model)
bounds = ot.Interval([0.0] * dim, [1.0] * dim)
problem.setBounds(bounds)

# design
experiment = ot.Box([1, 1])
inputSample = experiment.generate()
modelEval = model(inputSample)
outputSample = modelEval.getMarginal(0)

# first kriging model
covarianceModel = ot.SquaredExponential([0.3007, 0.2483], [0.981959])
basis = ot.ConstantBasisFactory(dim).build()
kriging = ot.KrigingAlgorithm(inputSample, outputSample, covarianceModel,
                              basis)
Exemple #14
0

# bounds
linear = ot.NumericalMathFunction(
    ['x1', 'x2', 'x3', 'x4'], ['y1'], ['x1+2*x2-3*x3+4*x4'])

dim = 4
startingPoint = [0.] * dim

bounds = ot.Interval([-3.]*dim,[5.]*dim)

for algo in [ot.SLSQP(), ot.LBFGS(), ot.NelderMead()]:
    for minimization in [True, False]:
        for inequality in [True, False]:
            for equality in [True, False]:
                problem = ot.OptimizationProblem(linear, ot.NumericalMathFunction(), ot.NumericalMathFunction(), bounds)
                problem.setMinimization(minimization)
                if inequality:
                    # x3 <= x1
                    problem.setInequalityConstraint(ot.NumericalMathFunction(['x1', 'x2', 'x3', 'x4'], ['ineq'], ['x1-x3']))
                if equality:
                    # x4 = 2
                    problem.setEqualityConstraint(ot.NumericalMathFunction(['x1', 'x2', 'x3', 'x4'], ['eq'], ['x4-2']))
                try:
                    algo.setProblem(problem)
                    algo.setStartingPoint(startingPoint)
                    print('algo=', algo)
                    algo.run()
                    result = algo.getResult()
                    print('x^=', printNumericalPoint(result.getOptimalPoint(), 4))
                except:
Exemple #15
0
    print("*** {} completed:".format(problemName))
    print("      -- Optimal point = ", result.getOptimalPoint())
    print("      -- Optimal value = ", result.getOptimalValue())
    print("      -- Iteration number = ", result.getIterationNumber())
    print("      -- Evaluation number = ", result.getEvaluationNumber())
    print("      -- Absolute error = {:.6e}".format(result.getAbsoluteError()))
    print("      -- Relative error = {:.6e}".format(result.getRelativeError()))
    print("      -- Residual error = {:.6e}".format(result.getResidualError()))
    print("      -- Constraint error = {:.6e}".format(
        result.getConstraintError()))


# Define the problems based on Rosebrock function
rosenbrock = ot.SymbolicFunction(['x1', 'x2'], ['(1-x1)^2+100*(x2-x1^2)^2'])

unboundedProblem = ot.OptimizationProblem(rosenbrock)

notConstrainingBounds = ot.Interval([-5.0, -5.0], [5.0, 5.0])
notConstrainingBoundsProblem = ot.OptimizationProblem(rosenbrock,
                                                      ot.Function(),
                                                      ot.Function(),
                                                      notConstrainingBounds)

constrainingBounds = ot.Interval([0.0, -2.0], [5.0, 0.5])
constrainingBoundsProblem = ot.OptimizationProblem(rosenbrock, ot.Function(),
                                                   ot.Function(),
                                                   constrainingBounds)

start = [3.0, -1.5]
unboundedPref = [1.0, 1.0]
boundedPref = [0.70856, 0.5]
Exemple #16
0
        else:
            oss += sep + format % point[i]
        sep = ","
    oss += "]"
    return oss


# linear
levelFunction = ot.SymbolicFunction(["x1", "x2", "x3", "x4"],
                                    ["x1+2*x2-3*x3+4*x4"])
startingPoint = ot.Point(4, 0.0)
bounds = ot.Interval(ot.Point(4, -3.0), ot.Point(4, 5.0))
algo = ot.TNC()
algo.setStartingPoint(startingPoint)

problem = ot.OptimizationProblem(levelFunction)
problem.setBounds(bounds)
problem.setMinimization(True)

algo.setProblem(problem)
print('algo=', algo)
algo.run()
result = algo.getResult()
print('result=', printPoint(result.getOptimalPoint(), 4))
print('multipliers=', printPoint(result.getLagrangeMultipliers(), 4))

problem.setMinimization(False)
algo.setProblem(problem)
print('algo=', algo)
algo.run()
result = algo.getResult()
Exemple #17
0
# Create the initial population
size = 100
dist = ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * 2)
pop0 = dist.getSample(size)
# constrained population
pop1 = []
while len(pop1) < size:
    x = dist.getRealization()
    if x[0] > x[1]:
        pop1.append(x)

multi_obj = ["nsga2", "moead", "mhaco", "nspso"]
for name in multi_obj:
    for use_ineq in [False, True]:
        zdt1 = ot.OptimizationProblem(f)
        zdt1.setBounds(bounds)
        if use_ineq:
            zdt1.setInequalityConstraint(ineq)
            algo = ot.Pagmo(zdt1, name, pop1)
        else:
            algo = ot.Pagmo(zdt1, name, pop0)
        algo.setBlockSize(8)
        # algo.setProgressCallback(progress)
        # algo.setStopCallback(stop)
        algo.run()
        result = algo.getResult()
        x = result.getFinalPoints()
        y = result.getFinalValues()
        fronts = result.getParetoFrontsIndices()
        assert len(fronts) > 0, "no pareto"
objective = ot.SymbolicFunction(['x1', 'x2', 'x3', 'x4'],
                                ['x1 + 2 * x2 - 3 * x3 + 4 * x4'])

# %%
# define the constraints
inequality_constraint = ot.SymbolicFunction(['x1', 'x2', 'x3', 'x4'],
                                            ['x1-x3'])

# %%
# define the problem bounds
dim = objective.getInputDimension()
bounds = ot.Interval([-3.] * dim, [5.] * dim)

# %%
# define the problem
problem = ot.OptimizationProblem(objective)
problem.setMinimization(True)
problem.setInequalityConstraint(inequality_constraint)
problem.setBounds(bounds)

# %%
# solve the problem
algo = ot.Cobyla()
algo.setProblem(problem)
startingPoint = [0.0] * dim
algo.setStartingPoint(startingPoint)
algo.run()

# %%
# retrieve results
result = algo.getResult()
Exemple #19
0
    def test_LevelFunction(self):
        dim = 4
        bounds = ot.Interval([-500] * dim, [500] * dim)
        # Objective function
        level_function = ot.NumericalMathFunction(['x1', 'x2', 'x3', 'x4'],
                                                  ['x1+2*x2-3*x3+4*x4'])
        level_value = 3
        # Define optimization problem
        problem = ot.OptimizationProblem()
        problem.setLevelFunction(level_function)
        problem.setLevelValue(level_value)
        problem.setBounds(bounds)
        # Prepare solver
        solver = GTOpt(problem)
        solver.setStartingPoint(bounds.getUpperBound())
        solver.enableConstraintsGradient()
        solver.enableObjectivesGradient()
        # Run optimization and get result
        solver.run()
        result = solver.getResult()
        # Asserts
        outputs_dim = level_function.getOutputDimension() + 1
        gradients_dim = outputs_dim * dim
        self.assertEqual(result.getOptimalValue().getDimension(),
                         outputs_dim - 1)
        self.assertEqual(dim + outputs_dim + gradients_dim,
                         solver.getP7History().getDimension())
        self.assertEqual(result.getIterationNumber(),
                         solver.getP7History().getSize())

        # Compare the results with those of p7core gtopt

        class Problem(gtopt.ProblemGeneric):
            def prepare_problem(self):
                p7_bounds = zip(bounds.getLowerBound(), bounds.getUpperBound())
                for i in range(dim):
                    self.add_variable(
                        bounds=p7_bounds[i],
                        initial_guess=solver.getStartingPoint()[i])
                self.add_objective()
                self.add_constraint(bounds=(0.0, 0.0))
                self.enable_objectives_gradient()
                self.enable_constraints_gradient()

            def evaluate(self, queryx, querymask):
                functions_batch = []
                output_masks_batch = []
                for x, mask in zip(queryx, querymask):
                    objectives = list(problem.getObjective()(x))
                    constraints = list(problem.getEqualityConstraint()(x))
                    # Objectives gradient
                    objectives_gradient = problem.getObjective().gradient(x)
                    objectives_gradient = sum(
                        np.matrix(objectives_gradient.transpose()).tolist(),
                        [])
                    # Constraints gradient
                    constraints_gradient = problem.getEqualityConstraint(
                    ).gradient(x)
                    constraints_gradient = sum(
                        np.matrix(constraints_gradient.transpose()).tolist(),
                        [])
                    functions_batch.append(objectives + constraints +
                                           objectives_gradient +
                                           constraints_gradient)
                    output_masks_batch.append(mask)
                return functions_batch, output_masks_batch

        self.compare_results(p7ot_result=solver.getP7Result(),
                             problem=Problem())
rosenbrock = ot.MemoizeFunction(rosenbrock)

# %%
graph = rosenbrock.draw(lowerbound, upperbound, [100] * 2)
graph.setTitle("Rosenbrock function")
view = viewer.View(graph)

# %%
# We see that the minimum is on the top right of the picture and the starting point is on the top left of the picture. Since the function has a long valley following the curve :math:`x_2 - x^2=0`, the algorithm generally have to follow the bottom of the valley.

# %%
# Create and solve the optimization problem
# -----------------------------------------

# %%
problem = ot.OptimizationProblem(rosenbrock)

# %%
algo = ot.Cobyla(problem)
algo.setMaximumRelativeError(1.e-1)  # on x
algo.setMaximumEvaluationNumber(50000)
algo.setStartingPoint(x0)
algo.run()

# %%
result = algo.getResult()

# %%
xoptim = result.getOptimalPoint()
xoptim
Exemple #21
0
 def test_Exceptions(self):
     function = ot.NumericalMathFunction(['x1', 'x2'],
                                         ['(x1-0.6)^2 + (x2-0.6)^2'])
     # Bounds
     with self.assertRaises(ValueError):
         bounds = ot.Interval([-5] * 3, [5] * 3)
         problem = ot.OptimizationProblem()
         problem.setObjective(function)
         problem.setBounds(bounds)
         solver = GTOpt(problem)
         solver.setStartingPoint([0, 0])
         solver.run()
     # Initial guess
     with self.assertRaises(ValueError):
         bounds = ot.Interval([-5] * 2, [5] * 2)
         problem = ot.OptimizationProblem()
         problem.setObjective(function)
         problem.setBounds(bounds)
         solver = GTOpt(problem)
         solver.setStartingPoint([0, 0, 0])
         solver.run()
     # Constraints
     with self.assertRaises(ValueError):
         bounds = ot.Interval([-5] * 2, [5] * 2)
         problem = ot.OptimizationProblem()
         problem.setObjective(function)
         problem.setEqualityConstraint(
             ot.NumericalMathFunction(['x1', 'x2', 'x3'], ['x1-x2']))
         problem.setBounds(bounds)
         solver = GTOpt(problem)
         solver.run()
     # Hints
     bounds = ot.Interval([-5] * 2, [5] * 2)
     problem = ot.OptimizationProblem()
     problem.setObjective(function)
     problem.setBounds(bounds)
     with self.assertRaises(TypeError):
         GTOpt(problem, input_hints={})
     with self.assertRaises(ValueError):
         GTOpt(problem, input_hints=[{}])
     with self.assertRaises(Exception):
         solver = GTOpt(problem,
                        input_hints=[{
                            "@GTOpt/VariableType": "int"
                        }, {}])
         solver.run()
     with self.assertRaises(TypeError):
         solver = GTOpt(problem,
                        input_hints=[{
                            "@GTOpt/VariableType": "integer"
                        }, None])
         solver.run()
     with self.assertRaises(ValueError):
         solver = GTOpt(problem,
                        input_hints=[{
                            "@GTOpt/VariableType": "integer"
                        }, {}])
         solver.setInputHints({"@GTOpt/VariableType": "integer"}, [10])
         solver.run()
     solver = GTOpt(problem,
                    input_hints=[{
                        "@GTOpt/VariableType": "integer"
                    }, {}])
     solver.setInputHints({"@GTOpt/VariableType": "continuous"}, [0])
# %%
graph = rastrigin.draw(lowerbound, upperbound, [100]*dim)
graph.setTitle("Rastrigin function")
view = viewer.View(graph, legend_kw={
                   'bbox_to_anchor': (1, 1), 'loc': "upper left"})
view.getFigure().tight_layout()

# %%
# We see that the Rastrigin function has several local minima. However, there is only one single global minimum at :math:`\vect{x}^\star=(0, 0)`.

# %%
# Create the problem and set the optimization algorithm
# -----------------------------------------------------

# %%
problem = ot.OptimizationProblem(rastrigin)

# %%
# We use the :class:`~openturns.Cobyla` algorithm and run it from multiple starting points selected by a :class:`~openturns.LowDiscrepancyExperiment`.

# %%
size = 64
distribution = ot.ComposedDistribution(
    [ot.Uniform(lowerbound[0], upperbound[0])] * dim)
experiment = ot.LowDiscrepancyExperiment(
    ot.SobolSequence(), distribution, size)
solver = ot.MultiStart(ot.Cobyla(problem), experiment.generate())

# %%
# Visualize the starting points of the optimization algorithm
# -----------------------------------------------------------
Exemple #23
0
print('x^=', printPoint(result.getOptimalPoint(), 4))
print('f(x^)=', printPoint(result.getOptimalValue(), 4))
print('lambda^=', printPoint(result.computeLagrangeMultipliers(), 4))

# bounds
linear = ot.SymbolicFunction(
    ['x1', 'x2', 'x3', 'x4'], ['x1+2*x2-3*x3+4*x4'])

dim = 4
startingPoint = [0.] * dim

bounds = ot.Interval([-3.] * dim, [5.] * dim)

for minimization in [True, False]:

    problem = ot.OptimizationProblem(
        linear, ot.Function(), ot.Function(), bounds)
    problem.setMinimization(minimization)
    algo = ot.Cobyla(problem)
    algo.setMaximumEvaluationNumber(150)
    algo.setStartingPoint(startingPoint)
    print('algo=', algo)
    algo.run()
    result = algo.getResult()
    print('x^=', printPoint(result.getOptimalPoint(), 4))
    print('f(x^)=', printPoint(result.getOptimalValue(), 4))
    print('lambda^=', printPoint(result.computeLagrangeMultipliers(), 4))

# empty problem
algo = ot.Cobyla()
try:
    algo.run()
Exemple #24
0
        else:
            sep = ","
        if m.fabs(point[i]) < eps:
            oss += sep + format % m.fabs(point[i])
        else:
            oss += sep + format % point[i]
        sep = ","
    oss += "]"
    return oss


# linear
levelFunction = ot.NumericalMathFunction(["x1", "x2", "x3", "x4"], ["y1"],
                                         ["x1+2*x2-3*x3+4*x4"])
startingPoint = ot.NumericalPoint(4, 0.0)
algo = ot.Cobyla(ot.OptimizationProblem(levelFunction, 3.0))
algo.setStartingPoint(startingPoint)
print('algo=', algo)
algo.run()
result = algo.getResult()
print('x^=', printNumericalPoint(result.getOptimalPoint(), 4))

# non-linear
levelFunction = ot.NumericalMathFunction(["x1", "x2", "x3", "x4"], ["y1"],
                                         ["x1*cos(x1)+2*x2*x3-3*x3+4*x3*x4"])
startingPoint = ot.NumericalPoint(4, 0.0)
algo = ot.Cobyla(ot.OptimizationProblem(levelFunction, 3.0))
algo.setStartingPoint(startingPoint)
algo.setMaximumIterationNumber(400)
algo.setMaximumAbsoluteError(1.0e-10)
algo.setMaximumRelativeError(1.0e-10)
Exemple #25
0
lowerBound = ot.NumericalPoint((-1.0, 1.0e-4))
upperBound = ot.NumericalPoint((3.0, 2.0))
finiteLowerBound = ot.BoolCollection((0, 1))
finiteUpperBound = ot.BoolCollection((0, 0))
bounds = ot.Interval(lowerBound, upperBound, finiteLowerBound,
                     finiteUpperBound)

# Create the starting point of the research
# For mu : the first point
# For sigma : a value evaluated from the two first data
startingPoint = ot.NumericalPoint(2)
startingPoint[0] = sample[0][0]
startingPoint[1] = m.sqrt(
    (sample[1][0] - sample[0][0]) * (sample[1][0] - sample[0][0]))

# Create the optimization problem
problem = ot.OptimizationProblem(myLogLikelihoodOT, ot.NumericalMathFunction(),
                                 ot.NumericalMathFunction(), bounds)
problem.setMinimization(False)

# Create the TNC algorithm
myAlgoTNC = ot.TNC(problem)
myAlgoTNC.setStartingPoint(startingPoint)

# Run the algorithm and extract results
myAlgoTNC.run()
resMLE = myAlgoTNC.getResult()
MLEparameters = resMLE.getOptimalPoint()
print("MLE of (mu, sigma) = (", MLEparameters[0], ", ", MLEparameters[1], ")")
# END_TEX