Example #1
0
    def run(self):
        """
        Compute the Sobol indices with the chosen algorithm. 
        """

        # create the NumericalMathFunction which computes the POD for a given
        # realization and for all defect sizes.
        if self._podType == "kriging":
            self._PODaggr = ot.NumericalMathFunction(
                PODaggrKriging(self._POD, self._dim, self._defectSizes,
                               self._detectionBoxCox))
        elif self._podType == "chaos":
            self._PODaggr = ot.NumericalMathFunction(
                PODaggrChaos(self._POD, self._dim, self._defectSizes,
                             self._detectionBoxCox, self._simulationSize))

        if self._method == "Saltelli":
            self._sa = ot.SaltelliSensitivityAlgorithm(self._distribution,
                                                       self._N, self._PODaggr,
                                                       False)
        elif self._method == "Martinez":
            self._sa = ot.MartinezSensitivityAlgorithm(self._distribution,
                                                       self._N, self._PODaggr,
                                                       False)
        elif self._method == "Jansen":
            self._sa = ot.JansenSensitivityAlgorithm(self._distribution,
                                                     self._N, self._PODaggr,
                                                     False)
        elif self._method == "MauntzKucherenko":
            self._sa = ot.MauntzKucherenkoSensitivityAlgorithm(
                self._distribution, self._N, self._PODaggr, False)
Example #2
0
    def __init__(self, chaosPOD, dim, defectSizes, detection, simulationSize):
        super(PODaggrChaos, self).__init__(dim, defectSizes.shape[0])
        self.chaosPOD = chaosPOD
        self.dim = dim
        self.defectSizes = defectSizes
        self.defectNumber = len(defectSizes)
        self.simulationSize = simulationSize
        self.detection = detection

        # get the sample of coefficient using the coef distribution
        # used to compute the POD for a given point
        sampleCoefs = chaosPOD.getCoefficientDistribution().getSample(
            simulationSize)

        # get some result from the polynomial chaos to build a vectoriel
        # chaos function that return the signal values for all chaos with
        # different coefficients for one specific point
        chaosResult = chaosPOD.getPolynomialChaosResult()
        reducedBasis = chaosResult.getReducedBasis()
        transformation = chaosResult.getTransformation()
        chaosFunctionCol = []
        for i, coefs in enumerate(sampleCoefs):
            standardChaosFunction = ot.NumericalMathFunction(
                reducedBasis, coefs)
            chaosFunctionCol.append(
                ot.NumericalMathFunction(standardChaosFunction,
                                         transformation))
        self.chaosFunction = ot.NumericalMathFunction(chaosFunctionCol)
Example #3
0
    def optimizeLambda(self, marginal, deltaRHS):
        """
        Compute the lambda values

        Parameters
        ----------
        marginal : int
            The indice of the perturbed marginal.
        deltaRHS : sequence of float of dim 2
            The values of the mean and variance + mean^2
        """

        # define the optimization function which the Lagrange function
        # and using the gradient and the hessian
        optimFunc = ot.PythonFunction(2, 1, lambda lamb: [self.H(marginal, lamb, deltaRHS)],
                            gradient=lambda lamb: self.gradH(marginal, lamb, deltaRHS),
                            hessian=lambda lamb: self.hessianH(marginal, lamb))

        # define the optimization problem
        optimPb = ot.OptimizationProblem(optimFunc,
                                         ot.NumericalMathFunction(),
                                         ot.NumericalMathFunction(),
                                         ot.Interval())

        # solve the problem using SLSQP from NLopt
        optim = ot.NLopt(optimPb, 'LD_SLSQP')
        optim.setStartingPoint([0, 0])
        optim.run()
        # return the lambda values, solution of the problem
        return optim.getResult().getOptimalPoint()
Example #4
0
 def _buildChaosFunction(self, reducedBasis, transformation, coefs):
     """
     Build the chaos metamodel with given coefficients.
     """
     standardChaosFunction = ot.NumericalMathFunction(reducedBasis, coefs)
     chaosFunction = ot.NumericalMathFunction(standardChaosFunction,
                                              transformation)
     return chaosFunction
Example #5
0
    def test_Multiobjective(self):
        dim = 4
        bounds = ot.Interval([-500] * dim, [500] * dim)
        inputs = ['x' + str(i) for i in range(dim)]
        # Rosenbrock function
        formulas = ['']
        for i in range(dim - 1):
            formulas[0] += '(1-x%d)^2+100*(x%d-x%d^2)^2%s' % (
                i, i + 1, i, '' if i == dim - 2 else '+')
        rosenbrock = ot.NumericalMathFunction(inputs, formulas)
        # Sphere function
        formulas = ['']
        for i in range(dim):
            formulas[0] += 'x%d^2%s' % (i, '' if i == dim - 1 else '+')
        sphere = ot.NumericalMathFunction(inputs, formulas)
        # Objective function
        objective = ot.NumericalMathFunction([rosenbrock, sphere])
        # Define optimization problem
        problem = ot.OptimizationProblem()
        problem.setObjective(objective)
        problem.setBounds(bounds)
        # Prepare solver
        solver = GTOpt(problem)
        solver.setStartingPoint(bounds.getUpperBound())
        # Run optimization and get result
        solver.run()
        result = solver.getResult()
        # Asserts
        outputs_dim = objective.getOutputDimension()
        self.assertEqual(result.getOptimalValue().getDimension(), outputs_dim)
        self.assertEqual(dim + outputs_dim,
                         solver.getP7History().getDimension())
        self.assertEqual(result.getIterationNumber(),
                         solver.getP7History().getSize())

        # Compare the results with those of p7core gtopt

        class Problem(gtopt.ProblemGeneric):
            def prepare_problem(self):
                p7_bounds = zip(bounds.getLowerBound(), bounds.getUpperBound())
                for j in range(dim):
                    self.add_variable(
                        bounds=p7_bounds[j],
                        initial_guess=solver.getStartingPoint()[j])
                self.add_objective()
                self.add_objective()

            def evaluate(self, queryx, querymask):
                functions_batch = []
                output_masks_batch = []
                for x, mask in zip(queryx, querymask):
                    functions_batch.append(list(objective(x)))
                    output_masks_batch.append(mask)
                return functions_batch, output_masks_batch

        self.compare_results(p7ot_result=solver.getP7Result(),
                             problem=Problem())
Example #6
0
    def _exec(self, X):
        inputTG = X.getTimeGrid()
        inputValues = X.getValues()
        f = ot.NumericalMathFunction(ot.PiecewiseLinearEvaluationImplementation(
            [x[0] for x in inputTG.getVertices()], inputValues))
        outputValues = ot.NumericalSample(0, 1)
        for t in self.outputGrid_.getVertices():
            kernel = ot.Normal(t[0], 0.05)

            def pdf(X):
                return [kernel.computePDF(X)]
            weight = ot.NumericalMathFunction(ot.PythonFunction(1, 1, pdf))
            outputValues.add(self.algo_.integrate(
                weight * f, kernel.getRange()))
        return ot.Field(self.outputGrid_, outputValues)
Example #7
0
 def test_gtapprox(self):
     input_dim = 2
     count = 10
     inputs = np.random.random((count, input_dim))
     outputs = [[x1 * x1 + x2 * x2] for (x1, x2) in inputs]
     # p7core model
     p7_model = gtapprox.Builder().build(inputs, outputs)
     # p7ot model function
     p7ot_function = ModelFunction(p7_model)
     # openturns function
     ot_function = ot.NumericalMathFunction(p7ot_function)
     # openturns optimization problem
     ot_optimization_problem = ot.OptimizationProblem()
     ot_optimization_problem.setObjective(p7ot_function)
     ot_optimization_problem.setBounds(ot.Interval([-100] * 2, [100] * 2))
     p7ot_solver = GTOpt(ot_optimization_problem)
     p7ot_solver.run()
     # openturns graphic
     input_dim = 1
     count = 10
     inputs = np.random.uniform(0, 10, [count, input_dim])
     outputs = [math.sin(x) for x in inputs]
     p7ot_function = ModelFunction(gtapprox.Builder().build(
         inputs, outputs))
     graph = p7ot_function.draw(0, 10, 100)
Example #8
0
    def __new__(
        self,
        n_input,
        n_output,
        wrapper_file,
        hosts=[],
        scheduler=None,
        analytical=False,
        n_cores=0,
        files_to_send=[],
        cleanup='ok',
        tmpdir=None,
        remote_tmpdir=None,
        user_data=None,
    ):

        instance = OpenTURNSDistributedPythonFunction(
            n_input,
            n_output,
            wrapper_file,
            hosts,
            scheduler,
            analytical,
            n_cores,
            files_to_send,
            cleanup,
            tmpdir,
            remote_tmpdir,
            user_data,
        )
        return ot.NumericalMathFunction(instance)
Example #9
0
 def __init__(self, a_i, Y_i):
     super(ReducedLogLikelihood, self).__init__(1, 1)
     self.setInputDescription(['lambda'])
     self.setOutputDescription(['LogLikelihood'])
     self.a_i_ = a_i
     self.Y_i_ = Y_i
     self.N_ = a_i.getSize()
     self.sumLogY_i = ot.NumericalMathFunction("y", "log(y)")(Y_i).computeMean()[0] * self.N_
Example #10
0
    def test_AdaptiveBlackbox(self):
        count = 20
        bounds = ot.Interval([-10] * 3, [10] * 3)
        function = ot.NumericalMathFunction(['x1', 'x2', 'x3'],
                                            ['x1 + 2*x2 + 3*x3'])
        generator = AdaptiveBlackbox(bounds=bounds,
                                     count=count,
                                     blackbox=function)
        generator.setDeterministic(True)
        self.assertEqual(count, generator._settings.get('budget'))
        self.assertEqual(generator.getCount(),
                         generator._settings.get('budget'))
        result = generator.generate()
        self.assertEqual(result.getDimension(), bounds.getDimension())
        self.assertEqual(result.getSize(), count)
        technique = generator.getP7Result().info["Generator"]["Technique"]
        self.assertEqual(technique, "Adaptive")
        # Compare the results with those of p7core generator
        p7_options = {
            'GTDoE/Technique': "Adaptive",
            'GTDoE/Deterministic': True
        }
        p7_bounds = self.convert_bounds(bounds)

        class P7_Blackbox(blackbox.Blackbox):
            def prepare_blackbox(self):
                # Add variables
                for i in range(function.getInputDimension()):
                    self.add_variable(bounds=(p7_bounds[0][i],
                                              p7_bounds[1][i]),
                                      name=function.getInputDescription()[i])
                # Add responses
                self.add_response(name=function.getOutputDescription()[0])

            def evaluate(self, points):
                response = []
                for point in points:
                    response.append(list(function(point)))
                return response

        p7_blackbox = P7_Blackbox()
        p7_result = gtdoe.Generator().generate(blackbox=p7_blackbox,
                                               bounds=p7_bounds,
                                               budget=count,
                                               options=p7_options)
        self.compare_results(result, p7_result)
        # Exceptions
        with self.assertRaises(TypeError):
            # Invalid blackbox
            AdaptiveBlackbox(bounds=bounds, count=count,
                             blackbox=None).generate()
        with self.assertRaises(ValueError):
            # Inconsistent blackbox and bounds dimension
            AdaptiveBlackbox(bounds=ot.Interval([1, 4], [5, 10]),
                             count=count,
                             blackbox=function).generate()
Example #11
0
    def test_Maximization(self):
        dim = 2
        bounds = ot.Interval([-500] * dim, [500] * dim)
        # Objective function
        objective = ot.NumericalMathFunction(
            ['x1', 'x2'], ['- (x1+2*x2-7)^2 - (2*x1+x2-5)^2 + 1'])
        # Define optimization problem
        problem = ot.OptimizationProblem()
        problem.setObjective(objective)
        problem.setBounds(bounds)
        # Maximization problem
        problem.setMinimization(False)
        # Prepare solver
        solver = GTOpt(problem)
        solver.setStartingPoint(bounds.getUpperBound())
        # Run optimization and get result
        solver.run()
        result = solver.getResult()
        # Asserts
        outputs_dim = objective.getOutputDimension()
        self.assertEqual(result.getOptimalValue().getDimension(), outputs_dim)
        self.assertEqual(dim + outputs_dim,
                         solver.getP7History().getDimension())
        self.assertEqual(result.getIterationNumber(),
                         solver.getP7History().getSize())

        # Compare the results with those of p7core gtopt

        class Problem(gtopt.ProblemGeneric):
            def prepare_problem(self):
                p7_bounds = zip(bounds.getLowerBound(), bounds.getUpperBound())
                self.add_variable(bounds=p7_bounds[0],
                                  initial_guess=solver.getStartingPoint()[0])
                self.add_variable(bounds=p7_bounds[1],
                                  initial_guess=solver.getStartingPoint()[1])
                self.add_objective()

            def evaluate(self, queryx, querymask):
                functions_batch = []
                output_masks_batch = []
                for x, mask in zip(queryx, querymask):
                    functions_batch.append(
                        [-1 * value for value in objective(x)])
                    output_masks_batch.append(mask)
                return functions_batch, output_masks_batch

        self.compare_results(p7ot_result=solver.getP7Result(),
                             problem=Problem(),
                             maximization=True)
Example #12
0
    def _buildKrigingAlgo(self, inputSample, outputSample):
        """
        Build the functional chaos algorithm without running it.
        """
        if self._basis is None:
            # create linear basis only for the defect parameter (1st parameter),
            # constant otherwise
            input = ['x' + str(i) for i in range(self._dim)]
            functions = []
            # constant
            functions.append(ot.NumericalMathFunction(input, ['y'], ['1']))
            # linear for the first parameter only
            functions.append(ot.NumericalMathFunction(input, ['y'],
                                                      [input[0]]))
            self._basis = ot.Basis(functions)

        if self._covarianceModel is None:
            # anisotropic squared exponential covariance model
            covColl = ot.CovarianceModelCollection(self._dim)
            for i in range(self._dim):
                if LooseVersion(ot.__version__) == '1.6':
                    covColl[i] = ot.SquaredExponential(1, 1.)
                elif LooseVersion(ot.__version__) > '1.6':
                    covColl[i] = ot.SquaredExponential([1], [1.])
            self._covarianceModel = ot.ProductCovarianceModel(covColl)

        if LooseVersion(ot.__version__) == "1.9":
            algoKriging = ot.KrigingAlgorithm(inputSample, outputSample,
                                              self._covarianceModel,
                                              self._basis)
        else:
            algoKriging = ot.KrigingAlgorithm(inputSample, outputSample,
                                              self._basis,
                                              self._covarianceModel, True)
        algoKriging.run()
        return algoKriging
Example #13
0
    def _runMonteCarlo(self, defect):
        # set a parametric function where the first parameter = given defect
        g = ot.NumericalMathFunction(self._metamodel, [0], [defect])
        g.enableHistory()
        g.clearHistory()
        g.clearCache()
        output = ot.RandomVector(g, ot.RandomVector(self._distribution))
        event = ot.Event(output, ot.Greater(), self._detectionBoxCox)

        ##### Monte Carlo ########
        algo_MC = ot.MonteCarlo(event)
        algo_MC.setMaximumOuterSampling(self._samplingSize)
        # set negative coef of variation to be sure the stopping criterion is the sampling size
        algo_MC.setMaximumCoefficientOfVariation(-1)
        algo_MC.run()
        return algo_MC.getResult()
Example #14
0
 def test_Model(self):
     input_dim = 3
     output_dim = 2
     count = 30
     inputs = np.random.random((count, input_dim))
     outputs = [[self.sphere(x), self.rastrigin(x)] for x in inputs]
     # p7core model
     p7_model = gtapprox.Builder().build(inputs, outputs)
     # p7ot model function
     model_function = ModelFunction(p7_model)
     # ot function
     ot_function = ot.NumericalMathFunction(model_function)
     self.assertEqual(ot_function.getInputDimension(), input_dim)
     self.assertEqual(ot_function.getOutputDimension(), output_dim)
     # Compare the result function and p7core model outputs
     for x in inputs:
         self.assertTrue(np.array_equal(p7_model.calc(x), ot_function(x)))
Example #15
0
 def __new__(cls, p7_model):
     if not isinstance(p7_model, gtapprox.Model):
         raise TypeError('No p7 model given. Expected ' +
                         str(gtapprox.Model) + ' object')
     # Create an intermediate function to fill execution methods for NumericalMathFunction
     ot_python_function = ot.OpenTURNSPythonFunction(
         p7_model.size_x, p7_model.size_f)
     ot_python_function._exec = p7_model.calc
     ot_python_function._exec_sample = p7_model.calc
     result = ot.NumericalMathFunction(ot_python_function)
     # Gradient object can't be passed directly
     # Here the required gradient methods are implemented manually
     gradient_implementation = _Gradient(p7_model.size_x, p7_model.size_f,
                                         p7_model)
     result.getGradient = lambda: gradient_implementation
     result.gradient = gradient_implementation.gradient
     result.getGradientCallsNumber = gradient_implementation.getCallsNumber
     result.setGradient(gradient_implementation)
     return result
Example #16
0
    def build(self, dataX, dataY):
        logLikelihood = ot.NumericalMathFunction(ReducedLogLikelihood(dataX, dataY))
        xlb = np.linspace(self.lambdaMin_,self.lambdaMax_,num=500)
        lambdax = [logLikelihood([x])[0] for x in xlb]
        algo = ot.TNC(logLikelihood)
        algo.setStartingPoint([xlb[np.array(lambdax).argmax()]])
        algo.setBoundConstraints(ot.Interval(self.lambdaMin_, self.lambdaMax_))
        algo.setOptimizationProblem(ot.BoundConstrainedAlgorithmImplementationResult.MAXIMIZATION)
        algo.run()
        optimalLambda = algo.getResult().getOptimizer()[0]

        # graph
        optimalLogLikelihood = algo.getResult().getOptimalValue()
        graph = logLikelihood.draw(0.01 * optimalLambda, 10.0 * optimalLambda)
        c = ot.Cloud([[optimalLambda, optimalLogLikelihood]])
        c.setColor("red")
        c.setPointStyle("circle")
        graph.add(c)
        return ot.BoxCoxTransform([optimalLambda]), graph
Example #17
0
    def test_gtopt(self):
        # Schaffer function
        objective = ot.NumericalMathFunction(
            ['x', 'y'],
            ['0.5 + ((sin(x^2-y^2))^2-0.5)/((1+0.001*(x^2+y^2))^2)'])
        lb = [-10] * objective.getInputDimension()
        ub = [10] * objective.getInputDimension()
        # Solve by p7ot.GTOpt
        problem = ot.OptimizationProblem()
        problem.setObjective(objective)
        problem.setBounds(ot.Interval(lb, ub))
        solver = GTOpt(problem)
        solver.setStartingPoint(ub)
        solver.run()
        ot_result = solver.getResult()

        # Solve by p7core.gtopt.Solver

        class Problem(gtopt.ProblemGeneric):
            def prepare_problem(self):
                for i in range(objective.getInputDimension()):
                    self.add_variable(bounds=(lb[i], ub[i]),
                                      initial_guess=ub[i])
                self.add_objective()

            def evaluate(self, queryx, querymask):
                functions_batch = []
                output_masks_batch = []
                for x, mask in zip(queryx, querymask):
                    functions_batch.append(objective(x))
                    output_masks_batch.append(mask)
                return functions_batch, output_masks_batch

        p7_problem = Problem()
        p7_result = gtopt.Solver().solve(problem=p7_problem)
        # Compare results
        self.assertEqual(ot_result.getOptimalValue(), p7_result.optimal.f[0])
        self.assertEqual(ot_result.getOptimalPoint(), p7_result.optimal.x[0])
        self.assertEqual(ot_result.getIterationNumber(),
                         len(p7_problem.history))
Example #18
0
 def test_gtdoe(self):
     # Experiment implementation
     p7ot_experiment = Sequence(count=10, bounds=ot.Interval(3))
     p7ot_result = p7ot_experiment.generate()
     if LooseVersion(
             ot.__version__) >= LooseVersion('1.8') and LooseVersion(
                 ot.__version__) != LooseVersion('1.8rc1'):
         myExperiment = ot.Experiment(p7ot_experiment)
         myExperimentImplementation = myExperiment.getImplementation()
         mySecondExperiment = ot.Experiment()
         mySecondExperiment.setImplementation(myExperimentImplementation)
         self.assertEqual(p7ot_result, mySecondExperiment.generate())
     else:
         with self.assertRaises(NotImplementedError):
             ot.Experiment(p7ot_experiment)
     # Using the NumericalMathFunction in the AdaptiveBlackbox DoE
     function = ot.NumericalMathFunction(['x1', 'x2'], ['x1^2 + x2^2'])
     experiment = AdaptiveBlackbox(count=10,
                                   bounds=ot.Interval(2),
                                   blackbox=function)
     experiment.generate()
     self.assertTrue(
         isinstance(experiment.getBlackbox(), ot.NumericalMathFunction))
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot
import math as m

ot.TESTPREAMBLE()

f = ot.NumericalMathFunction(['t', 'y0', 'y1'], ['dy0', 'dy1'],
                             ['t - y0', 'y1 + t^2'])
phi = ot.TemporalFunction(f)
solver = ot.RungeKutta(phi)
print('ODE solver=', solver)
initialState = [1.0, -1.0]
nt = 100
timeGrid = list(map(lambda i: (i**2.0) / (nt - 1.0)**2.0, range(nt)))
print('time grid=', ot.NumericalPoint(timeGrid))
result = solver.solve(initialState, timeGrid)
print('result=', result)
print('last value=', result[nt - 1])
t = timeGrid[nt - 1]
ref = ot.NumericalPoint(2)
ref[0] = -1.0 + t + 2.0 * m.exp(-t)
ref[1] = -2.0 + -2.0 * t - t * t + m.exp(t)
print('ref. value=', ref)
Example #20
0
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot

basisSize = 3
sampleSize = 3

X = ot.NumericalSample(sampleSize, 1)
for i in range(sampleSize):
    X[i, 0] = i + 1.0

Y = ot.NumericalSample(sampleSize, 1)

phis = []
for j in range(basisSize):
    phis.append(ot.NumericalMathFunction(['x'], ['y'], ['x^' + str(j + 1)]))
basis = ot.Basis(phis)
for i in range(basisSize):
    print(ot.NumericalMathFunctionCollection(basis)[i](X))

proxy = ot.DesignProxy(X, basis)
full = range(basisSize)

design = proxy.computeDesign(full)
print(design)

proxy.setWeight([0.5] * sampleSize)
design = proxy.computeDesign(full)
print(design)
Example #21
0
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot

levelFunction = ot.NumericalMathFunction(["x1", "x2", "x3", "x4"], ["y1"],
                                         ["x1+2*x2-3*x3+4*x4"])
# Add a finite difference gradient to the function, as Abdo Rackwitz algorithm
# needs it
myGradient = ot.NonCenteredFiniteDifferenceGradient(
    1e-7, levelFunction.getEvaluation())
print("myGradient = ", repr(myGradient))
# Substitute the gradient
levelFunction.setGradient(ot.NonCenteredFiniteDifferenceGradient(myGradient))
startingPoint = [0.0] * 4
algo = ot.AbdoRackwitz(ot.OptimizationProblem(levelFunction, 3.0))
algo.setStartingPoint(startingPoint)
algo.run()
print("result = ", algo.getResult())

levelFunction = ot.NumericalMathFunction(["x1", "x2", "x3", "x4"], ["y1"],
                                         ["x1*cos(x1)+2*x2*x3-3*x3+4*x3*x4"])
# Add a finite difference gradient to the function, as Abdo Rackwitz algorithm
# needs it
myGradient = ot.NonCenteredFiniteDifferenceGradient(
    1e-7, levelFunction.getEvaluation())
print("myGradient = ", repr(myGradient))
# Substitute the gradient
levelFunction.setGradient(ot.NonCenteredFiniteDifferenceGradient(myGradient))
startingPoint = [0.0] * 4
algo = ot.AbdoRackwitz(ot.OptimizationProblem(levelFunction, -0.5))
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View

distX = ot.Normal(0.0, 10.0)
myFunc = ot.NumericalMathFunction('x', 'x+sin(x)')
distFin = ot.CompositeDistribution(myFunc, distX)

graphPDF = distFin.drawPDF(1024)
graphPDF.setXTitle('x')
graphPDF.setLegendPosition('')
graphCDF = distFin.drawCDF(1024)
graphCDF.setXTitle('x')
graphCDF.setLegendPosition('')

fig = plt.figure(figsize=(8, 4))
plt.suptitle(
    "CompositeDistribution: f(x)=x+sin(x); L=Normal(0.0, 10.0): pdf and cdf")
pdf_axis = fig.add_subplot(121)
cdf_axis = fig.add_subplot(122)
pdf_axis.set_xlim(auto=True)
cdf_axis.set_xlim(auto=True)

View(graphPDF, figure=fig, axes=[pdf_axis], add_legend=True)
View(graphCDF, figure=fig, axes=[cdf_axis], add_legend=True)
Example #23
0
ot.TESTPREAMBLE()
ot.RandomGenerator.SetSeed(0)


def progress(percent):
    sys.stderr.write('-- progress=' + str(percent) + '%\n')


def stop():
    sys.stderr.write('-- stop?\n')
    return False


# We create a numerical math function
myFunction = ot.NumericalMathFunction(["E", "F", "L", "I"], ["d"],
                                      ["-F*L^3/(3*E*I)"])

dim = myFunction.getInputDimension()

# We create a normal distribution point of dimension 1
mean = [0.0] * dim
# E
mean[0] = 50.0
# F
mean[1] = 1.0
# L
mean[2] = 10.0
# I
mean[3] = 5.0
sigma = [1.0] * dim
R = ot.IdentityMatrix(dim)
Example #24
0
    def __init__(self,
                 inputDOE,
                 outputDOE,
                 physicalModel=None,
                 nMorePoints=0,
                 detection=None,
                 noiseThres=None,
                 saturationThres=None):

        # initialize the POD class
        boxCox = False
        super(AdaptiveHitMissPOD,
              self).__init__(inputDOE, outputDOE, detection, noiseThres,
                             saturationThres, boxCox)
        # inherited attributes
        # self._simulationSize
        # self._detection
        # self._inputSample
        # self._outputSample
        # self._noiseThres
        # self._saturationThres
        # self._lambdaBoxCox
        # self._boxCox
        # self._size
        # self._dim
        # self._censored

        self._distribution = None
        self._classifierType = 'rf'  # random forest classifier or svc
        self._ClassifierParameters = [[100], [None], [2], [0]]
        self._classifierModel = None
        self._confMat = None
        self._pmax = 0.52
        self._pmin = 0.45
        self._initialStartSize = 1000
        self._samplingSize = 10000  # Number of MC simulations to compute POD
        self._candidateSize = 5000
        self._nMorePoints = nMorePoints
        self._verbose = True
        self._graph = False  # flag to print or not the POD curves at each iteration
        self._probabilityLevel = None  # default graph option
        self._confidenceLevel = None  # default graph option
        self._graphDirectory = None  # graph directory for saving

        self._normalDist = ot.Normal()

        if self._censored:
            logging.info('Censored data are not taken into account : the ' + \
                         'kriging model is only built on filtered data.')

        # Run the preliminary run of the POD class
        result = self._run(self._inputSample, self._outputSample,
                           self._detection, self._noiseThres,
                           self._saturationThres, self._boxCox, self._censored)

        # get some results
        self._input = result['inputSample']
        self._signals = result['signals']
        self._detectionBoxCox = result['detectionBoxCox']
        self._boxCoxTransform = result['boxCoxTransform']

        # define the defect sizes for the interpolation function if not defined
        self._defectNumber = 20
        self._defectSizes = np.linspace(self._input[:, 0].getMin()[0],
                                        self._input[:, 0].getMax()[0],
                                        self._defectNumber)

        if detection is None:
            # case where the physical model already returns 0 or 1
            self._physicalModel = physicalModel
        else:
            # case where the physical model returns a true signal value
            # the physical model is turned into a binary model with respect
            # to the detection value.
            self._physicalModel = ot.NumericalMathFunction(
                physicalModel, ot.Greater(), self._detection)
            self._signals = np.array(np.array(self._signals) > self._detection,
                                     dtype='int')
size = 100
inputDimension = 6
inputSample = ot.Normal(inputDimension).getSample(size)
inputVar = ot.Description(inputDimension)
for i in range(inputDimension):
    inputVar[i] = 'X' + str(i)
inputSample.setDescription(inputVar)
formula = ot.Description(1)
expression = ''
for i in range(inputDimension):
    if i > 0:
        expression += '+'
    expression += 'cos(' + str(i + 1) + '*' + inputVar[i] + ')'
formula[0] = expression
outputVar = ot.Description(1)
outputVar[0] = 'y'
model = ot.NumericalMathFunction(inputVar, outputVar, formula)
outputSample = model(inputSample)

cobweb = ot.VisualTest_DrawCobWeb(inputSample, outputSample, 2.5, 3.0, 'red',
                                  False)

bb = cobweb.getBoundingBox()
# define the increasing factor of the bounding box
factor = 1.1
bb[1] = factor * bb[1]
cobweb.setBoundingBox(bb)

View(cobweb, figure_kwargs={'figsize': (10, 4)})
    # Set precision
    ot.PlatformInfo.SetNumericalPrecision(3)
    ot.ResourceMap.Set("GeneralizedLinearModelAlgorithm-LinearAlgebra", "HMAT")

    # Test 1
    print("========================")
    print("Test standard using HMat")
    print("========================")
    sampleSize = 6
    spatialDimension = 1

    # Create the function to estimate
    input_description = ["x0"]
    foutput = ["f0"]
    formulas = ["x0"]
    model = ot.NumericalMathFunction(input_description, foutput, formulas)

    X = ot.NumericalSample(sampleSize, spatialDimension)
    X2 = ot.NumericalSample(sampleSize, spatialDimension)
    for i in range(sampleSize):
        X[i, 0] = 3.0 + i
        X2[i, 0] = 2.5 + i
    X[0, 0] = 1.0
    X[1, 0] = 3.0
    X2[0, 0] = 2.0
    X2[1, 0] = 4.0
    Y = model(X)
    # Data validation
    Y2 = model(X2)
    for i in range(sampleSize):
        # Add a small noise to data
Example #27
0
    for i in range(point.getDimension()):
        if i == 0:
            sep = ""
        else:
            sep = ","
        if m.fabs(point[i]) < eps:
            oss += sep + format % m.fabs(point[i])
        else:
            oss += sep + format % point[i]
        sep = ","
    oss += "]"
    return oss


# bounds
linear = ot.NumericalMathFunction(
    ['x1', 'x2', 'x3', 'x4'], ['y1'], ['x1+2*x2-3*x3+4*x4'])

dim = 4
startingPoint = [0.0] * dim

bounds = ot.Interval([-3.] * dim, [5.] * dim)
algoNames = ot.NLopt.GetAlgorithmNames()

for algoName in algoNames:

    # STOGO might not be enabled
    # NEWUOA nan/-nan
    # COBYLA crashes on squeeze
    # ESCH not same results with 2.4.1
    if 'STOGO' in algoName or 'NEWUOA' in algoName or 'COBYLA' in algoName or 'ESCH' in algoName:
        print('-- Skipped: algo=', algoName)
Example #28
0
    for i in range(point.getDimension()):
        if i == 0:
            sep = ""
        else:
            sep = ","
        if m.fabs(point[i]) < eps:
            oss += sep + format % m.fabs(point[i])
        else:
            oss += sep + format % point[i]
        sep = ","
    oss += "]"
    return oss


# bounds
linear = ot.NumericalMathFunction(
    ['x1', 'x2', 'x3', 'x4'], ['y1'], ['x1+2*x2-3*x3+4*x4'])

dim = 4
startingPoint = [0.] * dim

bounds = ot.Interval([-3.]*dim,[5.]*dim)

for algo in [ot.SLSQP(), ot.LBFGS(), ot.NelderMead()]:
    for minimization in [True, False]:
        for inequality in [True, False]:
            for equality in [True, False]:
                problem = ot.OptimizationProblem(linear, ot.NumericalMathFunction(), ot.NumericalMathFunction(), bounds)
                problem.setMinimization(minimization)
                if inequality:
                    # x3 <= x1
                    problem.setInequalityConstraint(ot.NumericalMathFunction(['x1', 'x2', 'x3', 'x4'], ['ineq'], ['x1-x3']))
Example #29
0
    for i in range(point.getDimension()):
        if i == 0:
            sep = ""
        else:
            sep = ","
        if m.fabs(point[i]) < eps:
            oss += sep + format % m.fabs(point[i])
        else:
            oss += sep + format % point[i]
        sep = ","
    oss += "]"
    return oss


# linear
levelFunction = ot.NumericalMathFunction(["x1", "x2", "x3", "x4"], ["y1"],
                                         ["x1+2*x2-3*x3+4*x4"])
startingPoint = ot.NumericalPoint(4, 0.0)
algo = ot.Cobyla(ot.OptimizationProblem(levelFunction, 3.0))
algo.setStartingPoint(startingPoint)
print('algo=', algo)
algo.run()
result = algo.getResult()
print('x^=', printNumericalPoint(result.getOptimalPoint(), 4))

# non-linear
levelFunction = ot.NumericalMathFunction(["x1", "x2", "x3", "x4"], ["y1"],
                                         ["x1*cos(x1)+2*x2*x3-3*x3+4*x3*x4"])
startingPoint = ot.NumericalPoint(4, 0.0)
algo = ot.Cobyla(ot.OptimizationProblem(levelFunction, 3.0))
algo.setStartingPoint(startingPoint)
algo.setMaximumIterationNumber(400)
Example #30
0
import openturns as ot
import otmorris
from otmorris.plot_sensitivity import PlotEE
# Number of trajectories
r = 10
# Define experiments in [0,1]^20
# p-levels
p = 5
morris_experiment = otmorris.MorrisExperiment([p] * 20, r)
X = morris_experiment.generate()
f = ot.NumericalMathFunction(otmorris.MorrisFunction())
Y = f(X)
# Evaluate Elementary effects (ee)
ee = otmorris.Morris(X, Y)
# Compute mu/sigma
mean = ee.getMeanAbsoluteElementaryEffects()
sigma = ee.getStandardDeviationElementaryEffects()
fig = PlotEE(ee)
fig.show()