Ejemplo n.º 1
0
    modelx = ot.ParametricFunction(model, [0, 1, 2], params)
    return [modelx(x[i])[0] - ynoise[i, 0] for i in range(m)]


# Definition of residual as ot.PythonFunction and optimization problem
residual = ot.PythonFunction(n, m, residualFunction)
residualNoise = ot.PythonFunction(n, m, residualFunctionNoise)

lsqProblem = ot.LeastSquaresProblem(residual)
lsqNoiseProblem = ot.LeastSquaresProblem(residualNoise)

startingPoint = [0.0, 0.0, 0.0]

### LSQ SOLVER
# Definition of Dlib solver, setting starting point
lsqAlgo = ot.Dlib(lsqProblem, "LSQ")
lsqAlgo.setStartingPoint(startingPoint)
lsqAlgo.run()

# Retrieving results
lsqResult = lsqAlgo.getResult()
printResults(lsqResult, "LSQ (without noise)")

# Same with noise
lsqNoiseAlgo = ot.Dlib(lsqNoiseProblem, "LSQ")
lsqNoiseAlgo.setStartingPoint(startingPoint)
lsqNoiseAlgo.run()
lsqNoiseResult = lsqNoiseAlgo.getResult()
printResults(lsqNoiseResult, "LSQ (with noise)")

### LSQLM SOLVER
Ejemplo n.º 2
0
notConstrainingBoundsProblem = ot.OptimizationProblem(rastrigin, ot.Function(),
                                                      ot.Function(),
                                                      notConstrainingBounds)

constrainingBounds = ot.Interval([-1.0, -2.0], [5.0, -0.5])
constrainingBoundsProblem = ot.OptimizationProblem(rastrigin, ot.Function(),
                                                   ot.Function(),
                                                   constrainingBounds)

boundedPref = [0.0, -1.0]
unboundedPref = [0.0, 0.0]

## GLOBAL ALGORITHM ##

# Non-contraining bounds Global
notConstrainingBoundsGlobal = ot.Dlib(notConstrainingBoundsProblem, "Global")
notConstrainingBoundsGlobal.setStartingPoint([0.0] * 2)
notConstrainingBoundsGlobal.setMaximumEvaluationNumber(300)
notConstrainingBoundsGlobal.run()
printResults(notConstrainingBoundsGlobal.getResult(),
             "Non-constraining bounds Global")

# Contraining bounds Global
constrainingBoundsGlobal = ot.Dlib(constrainingBoundsProblem, "Global")
constrainingBoundsGlobal.setMaximumEvaluationNumber(300)
constrainingBoundsGlobal.setStartingPoint([0.0] * 2)
constrainingBoundsGlobal.run()
printResults(constrainingBoundsGlobal.getResult(),
             "Constraining bounds Global")

ott.assert_almost_equal(
# More details on dlib algorithms are available `here <http://dlib.net/optimization.html>`_ .

# %%
# Solving an unconstrained problem with conjugate gradient algorithm
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The following example will demonstrate the use of dlib conjugate gradient algorithm to find the minimum of `Rosenbrock function <https://en.wikipedia.org/wiki/Rosenbrock_function>`_. The optimal point can be computed analytically, and its value is [1.0, 1.0].

# %%
# Define the problem based on Rosebrock function
rosenbrock = ot.SymbolicFunction(['x1', 'x2'], ['(1-x1)^2+(x2-x1^2)^2'])
problem = ot.OptimizationProblem(rosenbrock)

# %%
# The optimization algorithm is instanciated from the problem to solve and the name of the algorithm
algo = ot.Dlib(problem, 'cg')
print("Dlib algorithm, type ", algo.getAlgorithmName())
print("Maximum iteration number: ", algo.getMaximumIterationNumber())
print("Maximum evaluation number: ", algo.getMaximumEvaluationNumber())
print("Maximum absolute error: ", algo.getMaximumAbsoluteError())
print("Maximum relative error: ", algo.getMaximumRelativeError())
print("Maximum residual error: ", algo.getMaximumResidualError())
print("Maximum constraint error: ", algo.getMaximumConstraintError())

# %%
# When using conjugate gradient, BFGS/LBFGS, Newton, least squares or trust region methods, optimization proceeds until one of the following criteria is met:
#
# - the errors (absolute, relative, residual, constraint) are all below the limits set by the user ;
# - the process reaches the maximum number of iterations or function evaluations.

# %%
Ejemplo n.º 4
0
    def run(self):

        bounds = self.bounds
        if self.operator(self.S, self.S + 1) == True:
            quantile_courant = ot.Point([self.S + 1])
        else:
            quantile_courant = ot.Point([self.S - 1])

        theta_0 = self.initial_theta
        num_iter = 0
        #main loop of adaptive importance sampling
        while self.operator(self.S, quantile_courant[0]):

            theta_courant = theta_0
            self.update_dist(theta_0)
            Sample = self.aux_distrib.getSample(
                self.n_IS)  # drawing of samples using auxiliary density
            Resp_sample = self.limit_state_function(
                Sample)  #evaluation on limit state function
            quantile_courant = Resp_sample.computeQuantile(
                self.rho_quantile)  #computation of current quantile

            f_opt = lambda theta: self.obj_func(
                Sample, Resp_sample, theta, quantile_courant
            )  #definition of objective function for CE

            objective = ot.PythonFunction(self.dim_theta, 1, f_opt)

            problem = ot.OptimizationProblem(
                objective
            )  # Definition of CE optimization  problemof auxiliary distribution parameters
            problem.setBounds(bounds)
            problem.setMinimization(False)

            algo_optim = ot.Dlib(problem, 'Global')
            algo_optim.setMaximumIterationNumber(50000)
            algo_optim.setStartingPoint(theta_0)
            algo_optim.run()  #Run of CE optimization

            # retrieve results
            result = algo_optim.getResult()
            theta_0 = result.getOptimalPoint()
            if self.verbose == True:
                if num_iter == 0:
                    print('theta', '| current threshold')
                    print(theta_0, '|', quantile_courant[0])
                else:
                    print(theta_0, '|', quantile_courant[0])

            num_iter += 1

        #Estimate probability
        self.update_dist(theta_courant)  #update of auxiliary density
        y = np.array([
            self.operator(Resp_sample[i][0], self.S)
            for i in range(Resp_sample.getSize())
        ])  #find failure points
        indices_critic = np.where(
            y == True)[0].tolist()  # find failure samples indices

        Resp_sample_critic = Resp_sample.select(indices_critic)
        Sample_critic = Sample.select(indices_critic)

        pdf_init_critic = self.distrib.computePDF(
            Sample_critic)  #evaluate initial PDF on failure samples
        pdf_aux_critic = self.aux_distrib.computePDF(
            Sample_critic)  #evaluate auxiliary PDF on failure samples
        proba = 1 / self.n_IS * np.sum(
            np.array([pdf_init_critic]) /
            np.array([pdf_aux_critic]))  #Calculation of failure probability

        self.proba = proba
        self.samples = Sample

        # Save of data in SimulationResult structure
        self.result.setProbabilityEstimate(proba)
        self.result.setCESamples(Sample)
        self.result.setAuxiliaryDensity(self.aux_distrib)
        return None
Ejemplo n.º 5
0
    modelx = ot.ParametricFunction(model, [0, 1, 2], params)
    return [modelx(x[i])[0] - ynoise[i, 0] for i in range(m)]


# Definition of residual as ot.PythonFunction and optimization problem
residual = ot.PythonFunction(n, m, residualFunction)
residualNoise = ot.PythonFunction(n, m, residualFunctionNoise)

lsqProblem = ot.LeastSquaresProblem(residual)
lsqNoiseProblem = ot.LeastSquaresProblem(residualNoise)

startingPoint = [0.0, 0.0, 0.0]

# LSQ SOLVER
# Definition of Dlib solver, setting starting point
lsqAlgo = ot.Dlib(lsqProblem, "least_squares")
lsqAlgo.setStartingPoint(startingPoint)
lsqAlgo.run()

# Retrieving results
lsqResult = lsqAlgo.getResult()
printResults(lsqResult, "LSQ (without noise)")

# Same with noise
lsqNoiseAlgo = ot.Dlib(lsqNoiseProblem, "least_squares")
lsqNoiseAlgo.setStartingPoint(startingPoint)
lsqNoiseAlgo.run()
lsqNoiseResult = lsqNoiseAlgo.getResult()
printResults(lsqNoiseResult, "LSQ (with noise)")

Ejemplo n.º 6
0
                                                      ot.Function(),
                                                      notConstrainingBounds)

constrainingBounds = ot.Interval([0.0, -2.0], [5.0, 0.5])
constrainingBoundsProblem = ot.OptimizationProblem(rosenbrock, ot.Function(),
                                                   ot.Function(),
                                                   constrainingBounds)

start = [3.0, -1.5]
unboundedPref = [1.0, 1.0]
boundedPref = [0.70856, 0.5]

## CONJUGATE GRADIENT ALGORITHM ##

# Unbounded CG
unboundedCg = ot.Dlib(unboundedProblem, 'CG')
unboundedCg.setStartingPoint(start)
unboundedCg.setMaximumIterationNumber(10000)
unboundedCg.setMaximumEvaluationNumber(100000)
unboundedCg.run()
printResults(unboundedCg.getResult(), 'Unbounded CG')

# Non-contraining bounds CG
notConstrainingBoundsCg = ot.Dlib(notConstrainingBoundsProblem, "CG")
notConstrainingBoundsCg.setStartingPoint(start)
notConstrainingBoundsCg.setMaximumIterationNumber(10000)
notConstrainingBoundsCg.setMaximumEvaluationNumber(100000)
notConstrainingBoundsCg.run()
printResults(notConstrainingBoundsCg.getResult(), "Non-constraining bounds CG")

# Contraining bounds CG
Ejemplo n.º 7
0
# More details on dlib algorithms are available `here <http://dlib.net/optimization.html>`_ .

# %%
# Solving an unconstrained problem with conjugate gradient algorithm
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The following example will demonstrate the use of dlib conjugate gradient algorithm to find the minimum of `Rosenbrock function <https://en.wikipedia.org/wiki/Rosenbrock_function>`_. The optimal point can be computed analytically, and its value is [1.0, 1.0].

# %%
# Define the problem based on Rosebrock function
rosenbrock = ot.SymbolicFunction(['x1', 'x2'], ['(1-x1)^2+(x2-x1^2)^2'])
problem = ot.OptimizationProblem(rosenbrock)

# %%
# The optimization algorithm is instanciated from the problem to solve and the name of the algorithm
algo = ot.Dlib(problem,'CG')
print("Dlib algorithm, type ", algo.getAlgorithmName())
print("Maximum iteration number: ", algo.getMaximumIterationNumber())
print("Maximum evaluation number: ", algo.getMaximumEvaluationNumber())
print("Maximum absolute error: ", algo.getMaximumAbsoluteError())
print("Maximum relative error: ", algo.getMaximumRelativeError())
print("Maximum residual error: ", algo.getMaximumResidualError())
print("Maximum constraint error: ", algo.getMaximumConstraintError())

# %%
# When using conjugate gradient, BFGS/LBFGS, Newton, least squares or trust region methods, optimization proceeds until one of the following criteria is met:
#
# - the errors (absolute, relative, residual, constraint) are all below the limits set by the user ;
# - the process reaches the maximum number of iterations or function evaluations.

# %%
Ejemplo n.º 8
0
notConstrainingBounds = ot.Interval([-5.0, -5.0], [5.0, 5.0])
notConstrainingBoundsProblem = ot.OptimizationProblem(
    rosenbrock, ot.Function(), ot.Function(), notConstrainingBounds)

constrainingBounds = ot.Interval([0.0, -2.0], [5.0, 0.5])
constrainingBoundsProblem = ot.OptimizationProblem(
    rosenbrock, ot.Function(), ot.Function(), constrainingBounds)

start = [3.0, -1.5]
unboundedPref = [1.0, 1.0]
boundedPref = [0.70856, 0.5]

## CONJUGATE GRADIENT ALGORITHM ##

# Unbounded CG
unboundedCg = ot.Dlib(unboundedProblem, 'cg')
unboundedCg.setStartingPoint(start)
unboundedCg.setMaximumIterationNumber(10000)
unboundedCg.setMaximumEvaluationNumber(100000)
unboundedCg.run()
printResults(unboundedCg.getResult(), 'Unbounded CG')

# Non-contraining bounds CG
notConstrainingBoundsCg = ot.Dlib(notConstrainingBoundsProblem, "cg")
notConstrainingBoundsCg.setStartingPoint(start)
notConstrainingBoundsCg.setMaximumIterationNumber(10000)
notConstrainingBoundsCg.setMaximumEvaluationNumber(100000)
notConstrainingBoundsCg.run()
printResults(notConstrainingBoundsCg.getResult(), "Non-constraining bounds CG")

# Contraining bounds CG