#! /usr/bin/env python from __future__ import print_function import openturns as ot levelFunction = ot.NumericalMathFunction(["x1", "x2", "x3", "x4"], ["y1"], ["x1+2*x2-3*x3+4*x4"]) # Add a finite difference gradient to the function, as Abdo Rackwitz algorithm # needs it myGradient = ot.NonCenteredFiniteDifferenceGradient( 1e-7, levelFunction.getEvaluation()) print("myGradient = ", repr(myGradient)) # Substitute the gradient levelFunction.setGradient(ot.NonCenteredFiniteDifferenceGradient(myGradient)) startingPoint = [0.0] * 4 algo = ot.AbdoRackwitz(ot.OptimizationProblem(levelFunction, 3.0)) algo.setStartingPoint(startingPoint) algo.run() print("result = ", algo.getResult()) levelFunction = ot.NumericalMathFunction(["x1", "x2", "x3", "x4"], ["y1"], ["x1*cos(x1)+2*x2*x3-3*x3+4*x3*x4"]) # Add a finite difference gradient to the function, as Abdo Rackwitz algorithm # needs it myGradient = ot.NonCenteredFiniteDifferenceGradient( 1e-7, levelFunction.getEvaluation()) print("myGradient = ", repr(myGradient)) # Substitute the gradient levelFunction.setGradient(ot.NonCenteredFiniteDifferenceGradient(myGradient)) startingPoint = [0.0] * 4 algo = ot.AbdoRackwitz(ot.OptimizationProblem(levelFunction, -0.5))
f4.getCacheHits() # %% # Evaluate the gradient of the function at a particular point gradientMatrix = f.gradient(x) gradientMatrix # %% # Evaluate the hessian of the function at a particular point hessianMatrix = f.hessian(x) hessianMatrix # %% # Change the gradient method to a non centered finite difference method step = [1e-7] * f.getInputDimension() gradient = ot.NonCenteredFiniteDifferenceGradient(step, f.getEvaluation()) f.setGradient(gradient) gradient # %% # Change the hessian method to a centered finite difference method step = [1e-7] * f.getInputDimension() hessian = ot.CenteredFiniteDifferenceHessian(step, f.getEvaluation()) f.setHessian(hessian) hessian # %% # Get the number of times the function has been evaluated f.getEvaluationCallsNumber() # %%
par_grad = param_f.parameterGradient(x) n_calls_1 = ot_f.getCallsNumber() assert n_calls_1 - n_calls_0 == 4, "Expected n_calls_1 - n_calls_0 == 4, here n_calls_1 - n_calls_0 == " + \ str(n_calls_1 - n_calls_0) assert n_calls == 4, "Expected n_calls == 4, here n_calls == " + str(n_calls) n_calls = 0 n_calls_0 = ot_f.getCallsNumber() f_grad = param_f.gradient(y) n_calls_1 = ot_f.getCallsNumber() assert n_calls_1 - n_calls_0 == 2, "Expected n_calls_1 - n_calls_0 == 2, here n_calls_1 - n_calls_0 == " + \ str(n_calls_1 - n_calls_0) assert n_calls == 2, "Expected n_calls == 2, here n_calls == " + str(n_calls) eps = 1e-7 gr_f = ot.NonCenteredFiniteDifferenceGradient(eps, ot_f.getEvaluation()) ot_f.setGradient(gr_f) n_calls = 0 param_f = ot.ParametricFunction(ot_f, [0, 1], [1.0, 2.0]) x = [3.5] y = [2.5] n_calls_0 = ot_f.getCallsNumber() par_grad = param_f.parameterGradient(x) n_calls_1 = ot_f.getCallsNumber() assert n_calls_1 - n_calls_0 == 3, "Expected n_calls_1 - n_calls_0 == 3, here n_calls_1 - n_calls_0 == " + \ str(n_calls_1 - n_calls_0) assert n_calls == 3, "Expected n_calls == 3, here n_calls == " + str(n_calls) n_calls = 0 n_calls_0 = ot_f.getCallsNumber()
#! /usr/bin/env python from __future__ import print_function import openturns as ot eps = 1e-4 # Instance creation myFunc = ot.SymbolicFunction(['x1', 'x2'], [ 'x1*sin(x2)', 'cos(x1+x2)', '(x2+1)*exp(x1-2*x2)']) epsilon = ot.Point(myFunc.getInputDimension(), eps) inPoint = ot.Point(epsilon.getDimension(), 1.0) myGradient = ot.NonCenteredFiniteDifferenceGradient( epsilon, myFunc.getEvaluation()) print("myGradient=", repr(myGradient)) print("myFunc.gradient(", repr(inPoint), ")=", repr(myFunc.gradient(inPoint))) print("myGradient.gradient(", repr(inPoint), ")=", repr(myGradient.gradient(inPoint))) # Substitute the gradient # DEBUG ici myFunc.setGradient( ot.NonCenteredFiniteDifferenceGradient(myGradient)) print("myFunc.gradient(", repr(inPoint), ")=", repr( myFunc.gradient(inPoint)), " (after substitution)")