def create_monte_carlo(model, inputRandomVector, coefficient_variation): """Create a Monte Carlo algorithm. Parameters ---------- model : OpenTURNS Function. inputRandomVector : OpenTURNS RandomVector, vector of random inputs. coefficient_variation : Float, target for the coefficient of variation of the estimator. """ outputVariableOfInterest = ot.CompositeRandomVector(model, inputRandomVector) # Create an Event from this RandomVector threshold = 30 myEvent = ot.ThresholdEvent(outputVariableOfInterest, ot.Greater(), threshold) myEvent.setName("Deviation > %g cm" % threshold) # Create a Monte Carlo algorithm experiment = ot.MonteCarloExperiment() myAlgoMonteCarlo = ot.ProbabilitySimulationAlgorithm(myEvent, experiment) myAlgoMonteCarlo.setBlockSize(100) myAlgoMonteCarlo.setMaximumCoefficientOfVariation(coefficient_variation) return myAlgoMonteCarlo
def _runMonteCarlo(self, defect): # set a parametric function where the first parameter = given defect g = ot.NumericalMathFunction(self._metamodel, [0], [defect]) g.enableHistory() g.clearHistory() g.clearCache() output = ot.RandomVector(g, ot.RandomVector(self._distribution)) event = ot.Event(output, ot.Greater(), self._detectionBoxCox) ##### Monte Carlo ######## algo_MC = ot.MonteCarlo(event) algo_MC.setMaximumOuterSampling(self._samplingSize) # set negative coef of variation to be sure the stopping criterion is the sampling size algo_MC.setMaximumCoefficientOfVariation(-1) algo_MC.run() return algo_MC.getResult()
# Stream out the result print('algo result=', myAlgo.getResult()) print('probability distribution=', myAlgo.getResult().getProbabilityDistribution()) print('-' * 32) ot.RandomGenerator.SetSeed(0) description = ot.Description() description.add('composite vector/comparison event') dim = 2 distribution = ot.Normal(dim) Xvector = ot.RandomVector(distribution) f = ot.SymbolicFunction(['x0', 'x1'], ['x0+x1']) Yvector = ot.CompositeRandomVector(f, Xvector) s = 1.0 event1 = ot.ThresholdEvent(Yvector, ot.Greater(), s) description.add('composite vector/domain event') domain1D = ot.LevelSet(ot.SymbolicFunction( ['x0'], ['sin(x0)']), ot.LessOrEqual(), -0.5) event2 = ot.DomainEvent(Yvector, domain1D) description.add('composite vector/interval event') interval = ot.Interval(0.5, 1.5) event3 = ot.ThresholdEvent(Yvector, interval) description.add('process/domain event') Xprocess = ot.WhiteNoise(distribution, ot.RegularGrid(0.0, 0.1, 10)) domain2D = ot.LevelSet( ot.SymbolicFunction(['x0', 'x1'], ['(x0-1)^2+x1^2']), ot.LessOrEqual(), 1.0) event4 = ot.ProcessEvent(Xprocess, domain2D) all_events = [event1, event2, event3, event4] for i, event in enumerate(all_events): print(description[i])
def __init__(self, inputDOE, outputDOE, physicalModel=None, nMorePoints=0, detection=None, noiseThres=None, saturationThres=None): # initialize the POD class boxCox = False super(AdaptiveHitMissPOD, self).__init__(inputDOE, outputDOE, detection, noiseThres, saturationThres, boxCox) # inherited attributes # self._simulationSize # self._detection # self._inputSample # self._outputSample # self._noiseThres # self._saturationThres # self._lambdaBoxCox # self._boxCox # self._size # self._dim # self._censored self._distribution = None self._classifierType = 'rf' # random forest classifier or svc self._ClassifierParameters = [[100], [None], [2], [0]] self._classifierModel = None self._confMat = None self._pmax = 0.52 self._pmin = 0.45 self._initialStartSize = 1000 self._samplingSize = 10000 # Number of MC simulations to compute POD self._candidateSize = 5000 self._nMorePoints = nMorePoints self._verbose = True self._graph = False # flag to print or not the POD curves at each iteration self._probabilityLevel = None # default graph option self._confidenceLevel = None # default graph option self._graphDirectory = None # graph directory for saving self._normalDist = ot.Normal() if self._censored: logging.info('Censored data are not taken into account : the ' + \ 'kriging model is only built on filtered data.') # Run the preliminary run of the POD class result = self._run(self._inputSample, self._outputSample, self._detection, self._noiseThres, self._saturationThres, self._boxCox, self._censored) # get some results self._input = result['inputSample'] self._signals = result['signals'] self._detectionBoxCox = result['detectionBoxCox'] self._boxCoxTransform = result['boxCoxTransform'] # define the defect sizes for the interpolation function if not defined self._defectNumber = 20 self._defectSizes = np.linspace(self._input[:, 0].getMin()[0], self._input[:, 0].getMax()[0], self._defectNumber) if detection is None: # case where the physical model already returns 0 or 1 self._physicalModel = physicalModel else: # case where the physical model returns a true signal value # the physical model is turned into a binary model with respect # to the detection value. self._physicalModel = ot.NumericalMathFunction( physicalModel, ot.Greater(), self._detection) self._signals = np.array(np.array(self._signals) > self._detection, dtype='int')
mycopula = ot.NormalCopula(R) # %% # And we endly create the composed input probability distribution. inputDistribution = ot.ComposedDistribution([E, F, L, I], mycopula) inputDistribution.setDescription(("E", "F", "L", "I")) # %% # Create the event whose probability we want to estimate: inputRandomVector = ot.RandomVector(inputDistribution) outputVariableOfInterest = ot.CompositeRandomVector(model_fmu, inputRandomVector) threshold = 30 event = ot.ThresholdEvent(outputVariableOfInterest, ot.Greater(), threshold) event.setName("Deviation > %g cm" % threshold) # %% # Parameterize and run the Monte Carlo algorithm: ot.RandomGenerator.SetSeed(23091926) # set seed for reproducibility experiment = ot.MonteCarloExperiment() algo = ot.ProbabilitySimulationAlgorithm(event, experiment) algo.setMaximumOuterSampling(200) algo.setMaximumCoefficientOfVariation(0.2) algo.run() # %% # Draw the distribution of threshold excedance probability:
# %% # We consider three functions `f1`, `f2` and `f3` : f1 = ot.SymbolicFunction(['x0', 'x1'], ['x0']) f2 = ot.SymbolicFunction(['x0', 'x1'], ['x1']) f3 = ot.SymbolicFunction(['x0', 'x1'], ['x0+x1']) # %% # We build :class:`~openturns.CompositeRandomVector` from these functions and the initial distribution. Y1 = ot.CompositeRandomVector(f1, X) Y2 = ot.CompositeRandomVector(f2, X) Y3 = ot.CompositeRandomVector(f3, X) # %% # We define three basic events :math:`E_1=\{(x_0,x_1)~:~x_0 < 0 \}`, :math:`E_2=\{(x_0,x_1)~:~x_1 > 0 \}` and :math:`E_3=\{(x_0,x_1)~:~x_0+x_1>0 \}`. e1 = ot.ThresholdEvent(Y1, ot.Less(), 0.0) e2 = ot.ThresholdEvent(Y2, ot.Greater(), 0.0) e3 = ot.ThresholdEvent(Y3, ot.Greater(), 0.0) # %% # The restriction of the domain :math:`E_1` to :math:`[-4,4] \times [-4, 4]` is the grey area. myGraph = ot.Graph(r'Representation of the event $E_1$', r'$x_1$', r'$x_2$', True, '') data = [[-4, -4], [0, -4], [0, 4], [-4, 4]] myPolygon = ot.Polygon(data) myPolygon.setColor('grey') myPolygon.setEdgeColor('black') myGraph.add(myPolygon) view = otv.View(myGraph) axes = view.getAxes() _ = axes[0].set_xlim(-4.0, 4.0) _ = axes[0].set_ylim(-4.0, 4.0)
model = ot.SymbolicFunction(["x1", "x2"], ["x1+2*x2"]) # Create the input distribution and random vector X inputDist = ot.Normal(2) inputDist.setDescription(['X1', 'X2']) inputVector = ot.RandomVector(inputDist) # Create the output random vector Y=model(X) output = ot.CompositeRandomVector(model, inputVector) output.setName("MyOutputY") # %% # Create the physical event Y > 4 threshold = 4 myEvent = ot.ThresholdEvent(output, ot.Greater(), threshold) # Create the associated standard event in the standard space myStandardEvent = ot.StandardEvent(myEvent) # %% # First : FORM analyses to get the design point myCobyla = ot.Cobyla() myStartingPoint = inputDist.getMean() myAlgoFORM = ot.FORM(myCobyla, myEvent, myStartingPoint) myAlgoFORM.run() FORMResult = myAlgoFORM.getResult() standardSpaceDesignPoint = FORMResult.getStandardSpaceDesignPoint() # %% # Fix the importance level epsilon of the test
# %% # Create a model model = ot.SymbolicFunction(['x1', 'x2'], ['x1^2+x2']) R = ot.CorrelationMatrix(2) R[0, 1] = -0.6 inputDist = ot.Normal([0., 0.], R) inputDist.setDescription(['X1', 'X2']) inputVector = ot.RandomVector(inputDist) # Create the output random vector Y=model(X) Y = ot.CompositeRandomVector(model, inputVector) # Create the event Y > 4 threshold = 4.0 event = ot.ThresholdEvent(Y, ot.Greater(), threshold) # %% # Create a FORM algorithm solver = ot.Cobyla() startingPoint = inputDist.getMean() algo = ot.FORM(solver, event, startingPoint) # Run the algorithm and retrieve the result algo.run() result_form = algo.getResult() # %% # Create the post analytical importance sampling simulation algorithm algo = ot.PostAnalyticalImportanceSampling(result_form) algo.run()
from __future__ import print_function import openturns as ot ot.TESTPREAMBLE() # Uncertain parameters distribution = ot.Normal([1.0] * 3, [2.0] * 3, ot.CorrelationMatrix(3)) distribution.setName("Unnamed") # Model f = ot.SymbolicFunction(["x", "y", "z"], ["x-1.5*y+2*z"]) # Sampling size = 100 inputSample = distribution.getSample(size) outputSample = f(inputSample) comparisonOperators = [ot.Less(), ot.LessOrEqual(), ot.Greater(), ot.GreaterOrEqual()] threshold = 3.0 ot.ResourceMap.SetAsUnsignedInteger( "SimulationSensitivityAnalysis-DefaultSampleMargin", 10) for i in range(4): # Analysis based on an event X = ot.RandomVector(distribution) Y = ot.CompositeRandomVector(f, X) event = ot.ThresholdEvent(Y, comparisonOperators[i], threshold) algo = ot.SimulationSensitivityAnalysis(event, inputSample, outputSample) print("algo=", algo) # Perform the analysis print("Mean point in event domain=", algo.computeMeanPointInEventDomain()) print("Importance factors at threshold ", threshold, " =", algo.computeImportanceFactors())
from __future__ import print_function import openturns as ot ot.TESTPREAMBLE() model = ot.SymbolicFunction(['x0', 'x1', 'x2', 'x3'], ['-(6+x0^2-x1+x2+3*x3)']) dim = model.getInputDimension() marginals = [ot.Normal(5.0, 3.0) for i in range(dim)] distribution = ot.ComposedDistribution( marginals, ot.ComposedCopula([ot.ClaytonCopula(), ot.NormalCopula()])) #distribution = ot.Normal([5]*dim, [3]*dim, ot.CorrelationMatrix(dim)) #distribution = ot.ComposedDistribution(marginals, ot.IndependentCopula(dim)) distribution.setDescription(['marginal' + str(i) for i in range(dim)]) vect = ot.RandomVector(distribution) output = ot.CompositeRandomVector(model, vect) event = ot.ThresholdEvent(output, ot.Greater(), 0.0) solver = ot.Cobyla() solver.setMaximumEvaluationNumber(1000) solver.setMaximumAbsoluteError(1.0e-10) solver.setMaximumRelativeError(1.0e-10) solver.setMaximumResidualError(1.0e-10) solver.setMaximumConstraintError(1.0e-10) algo = ot.FORM(solver, event, distribution.getMean()) algo.run() result = algo.getResult() hasoferReliabilityIndexSensitivity = result.getHasoferReliabilityIndexSensitivity( ) print(hasoferReliabilityIndexSensitivity)
X = ot.RandomVector(distribution) # %% # Define some basic events E1, E2 and E3. # %% f1 = ot.SymbolicFunction(['x0', 'x1'], ['x0']) f2 = ot.SymbolicFunction(['x0', 'x1'], ['x1']) f3 = ot.SymbolicFunction(['x0', 'x1'], ['x0+x1']) Y1 = ot.CompositeRandomVector(f1, X) Y2 = ot.CompositeRandomVector(f2, X) Y3 = ot.CompositeRandomVector(f3, X) e1 = ot.ThresholdEvent(Y1, ot.Less(), 0.0) # E1 <=> x0<0 e2 = ot.ThresholdEvent(Y2, ot.Greater(), 0.0) # E2 <=> x1>0 e3 = ot.ThresholdEvent(Y3, ot.Greater(), 0.0) # E3 <=> x0+x1>0 # %% # Define the intersection E3=E1 AND E2. # %% e4 = ot.IntersectionEvent([e1, e2]) # %% # Approximate probability of that event: :math:`\approx 1/4`. # %% e4.getSample(10000).computeMean() # %%
# %% # Create model f(x) = x1 + 2*x2 model = ot.SymbolicFunction(['x1', 'x2'], ['x1+2*x2']) # Create the input distribution and random vector X inputDist = ot.Normal(2) inputDist.setDescription(['X1', 'X2']) inputVector = ot.RandomVector(inputDist) # Create the output random vector Y=f(X) outputVector = ot.CompositeRandomVector(model, inputVector) # %% # Create the event Y > 3 threshold = 3.0 event = ot.ThresholdEvent(outputVector, ot.Greater(), threshold) # %% # Realization as a Bernoulli print('realization=', event.getRealization()) # %% # Sample of 10 realizations as a Bernoulli print('sample=', event.getSample(10)) # %% # Build a standard event based on an event standardEvent = ot.StandardEvent(event)
myStudy.add(monteCarlo) # 2-b Taylor Expansion ## taylor = persalys.TaylorExpansionMomentsAnalysis('Taylor', model1) taylor.setInterestVariables(['y0', 'y1']) myStudy.add(taylor) # 2-c Taylor Expansion which generate an error taylor2 = persalys.TaylorExpansionMomentsAnalysis('Taylor2', model1) taylor2.setInterestVariables(['fake_var']) myStudy.add(taylor2) # 3- reliability ## # limit state ## limitState = persalys.LimitState('aLimitState', model1, 'y1', ot.Greater(), 0.5) myStudy.add(limitState) optimAlgo = ot.AbdoRackwitz() optimAlgo.setMaximumIterationNumber(150) optimAlgo.setMaximumAbsoluteError(1e-3) # 3-a Monte Carlo ## monteCarloReliability = persalys.MonteCarloReliabilityAnalysis( 'MonteCarloReliability', limitState) monteCarloReliability.setMaximumCoefficientOfVariation(-1.) monteCarloReliability.setMaximumElapsedTime(1000) monteCarloReliability.setMaximumCalls(20) monteCarloReliability.setSeed(2) myStudy.add(monteCarloReliability)
Zm_law = ot.Triangular(54., 55., 56.) coll = ot.DistributionCollection([Q_law, Ks_law, Zv_law, Zm_law]) distribution = ot.ComposedDistribution(coll) x = [dist.computeQuantile(0.5)[0] for dist in coll] fx = function(x) for k in [0.0, 2.0, 5.0, 8.][0:1]: randomVector = ot.RandomVector(distribution) composite = ot.CompositeRandomVector(function, randomVector) print('--------------------') print('model flood S <', k, 'gamma=', end=' ') print('f(', ot.Point(x), ')=', fx) event = ot.ThresholdEvent(composite, ot.Greater(), k) for n in [100, 1000, 5000][1:2]: for gamma1 in [0.25, 0.5, 0.75][1:2]: experiment = ot.MonteCarloExperiment() algo = ot.ProbabilitySimulationAlgorithm(event, experiment) algo.setMaximumOuterSampling(100 * n) # algo.setMaximumCoefficientOfVariation(-1.) algo.run() result = algo.getResult() print(result) algo = ot.AdaptiveDirectionalStratification(event) algo.setMaximumOuterSampling(n) algo.setGamma([gamma1, 1.0 - gamma1]) calls0 = function.getEvaluationCallsNumber() algo.run() calls = function.getEvaluationCallsNumber() - calls0
import openturns as ot ot.TESTPREAMBLE() # Uncertain parameters distribution = ot.Normal([1.0] * 3, [2.0] * 3, ot.CorrelationMatrix(3)) distribution.setName("Unnamed") # Model f = ot.SymbolicFunction(["x", "y", "z"], ["x-1.5*y+2*z"]) # Sampling size = 100 inputSample = distribution.getSample(size) outputSample = f(inputSample) comparisonOperators = [ ot.Less(), ot.LessOrEqual(), ot.Greater(), ot.GreaterOrEqual() ] threshold = 3.0 ot.ResourceMap.SetAsUnsignedInteger( "SimulationSensitivityAnalysis-DefaultSampleMargin", 10) for i in range(4): # Analysis based on an event X = ot.RandomVector(distribution) Y = ot.CompositeRandomVector(f, X) event = ot.ThresholdEvent(Y, comparisonOperators[i], threshold) algo = ot.SimulationSensitivityAnalysis(event, inputSample, outputSample) print("algo=", algo) # Perform the analysis print("Mean point in event domain=", algo.computeMeanPointInEventDomain()) print("Importance factors at threshold ", threshold, " =",
ot.Normal(30.0, 7.5), 0, ot.TruncatedDistribution.LOWER) dist_Zv = ot.Uniform(49.0, 51.0) dist_Zm = ot.Uniform(54.0, 56.0) Q = persalys.Input('Q', 1000., dist_Q, 'Débit maximal annuel (m3/s)') Ks = persalys.Input('Ks', 30., dist_Ks, 'Strickler (m^(1/3)/s)') Zv = persalys.Input('Zv', 50., dist_Zv, 'Côte de la rivière en aval (m)') Zm = persalys.Input('Zm', 55., dist_Zm, 'Côte de la rivière en amont (m)') S = persalys.Output('S', 'Surverse (m)') model = persalys.SymbolicPhysicalModel('myPhysicalModel', [Q, Ks, Zv, Zm], [ S], ['(Q/(Ks*300.*sqrt((Zm-Zv)/5000)))^(3.0/5.0)+Zv-55.5-3.']) myStudy.add(model) # limit state ## limitState = persalys.LimitState('limitState1', model, 'S', ot.Greater(), 0.) myStudy.add(limitState) # Monte Carlo ## montecarlo = persalys.MonteCarloReliabilityAnalysis( 'myMonteCarlo', limitState) montecarlo.setMaximumCalls(10000) myStudy.add(montecarlo) montecarlo.run() montecarloResult = montecarlo.getResult() # Comparaison openturns.testing.assert_almost_equal(montecarloResult.getSimulationResult().getProbabilityEstimate(), 0.0, 1e-6) # FORM-IS ##
def run_ImportanceSampling( event, pstar, sd=1.0, coefVar=0.05, outerSampling=1000, blockSize=10, seed=1234, verbose=False, failure_domain=None, ): """ Run an importance sampling simulation. Parameters ---------- event : openturns.Event The failure event. pstar : list of points Design points in the standard space where to centered the instrumental distribution. sd : positive float The standard deviation of the instrumental distribution. coefVar : float The target coefficient of variation. outerSampling : int The maximum number of outer iterations. Nb of iterations = outerSampling x blockSize. blockSize : int The number of samples send to evaluate simultaneously. seed : int Seed for the openturns random generator. logfile : bool Enable or not to write the log in ImportanceSampling.log file. verbose : bool Enable or not the display of the result. activeCache : bool Enable or not the cache mechanism of the NumericalMathFunction. activeHistory : bool Enable or not the history mechanism of the NumericalMathFunction. failure_domain : string Type of failure domain form : either 'union' or 'intersection'. Only needed if the event is a list. """ # case with the limit state defined as an intersection # or a union of the event if type(event) is list: n_event = len(event) antecedent = event[0].getAntecedent() if failure_domain == "union": def function_union(X): sample = ot.NumericalSample(X.getSize(), n_event) for i in range(n_event): sample[:, i] = event[i].getFunction()(X) sample = np.array(sample) for i in range(n_event): if (event[i].getOperator().getImplementation( ).getClassName() == "Less" or event[i].getOperator( ).getImplementation().getClassName() == "LessOrEqual"): sample[:, i] = sample[:, i] < event[i].getThreshold() if (event[i].getOperator().getImplementation( ).getClassName() == "Greater" or event[i].getOperator( ).getImplementation().getClassName() == "GreaterOrEqual"): sample[:, i] = sample[:, i] >= event[i].getThreshold() return np.atleast_2d(sample.sum(axis=1)).T model = ot.PythonFunction( event[0].getFunction().getInputDimension(), event[0].getFunction().getOutputDimension(), func_sample=function_union, ) output = ot.RandomVector(model, antecedent) event = ot.ThresholdEvent(output, ot.Greater(), 0.0) elif failure_domain == "intersection": def function_intersection(X): sample = ot.NumericalSample(X.getSize(), n_event) for i in range(n_event): sample[:, i] = event[i].getFunction()(X) sample = np.array(sample) for i in range(n_event): if (event[i].getOperator().getImplementation( ).getClassName() == "Less" or event[i].getOperator( ).getImplementation().getClassName() == "LessOrEqual"): sample[:, i] = sample[:, i] < event[i].getThreshold() if (event[i].getOperator().getImplementation( ).getClassName() == "Greater" or event[i].getOperator( ).getImplementation().getClassName() == "GreaterOrEqual"): sample[:, i] = sample[:, i] >= event[i].getThreshold() return np.atleast_2d(sample.prod(axis=1)).T model = ot.PythonFunction( event[0].getFunction().getInputDimension(), event[0].getFunction().getOutputDimension(), func_sample=function_intersection, ) output = ot.RandomVector(model, antecedent) new_event = ot.ThresholdEvent(output, ot.Greater(), 0.0) else: model = event.getFunction() new_event = event # Initialize the random generator ot.RandomGenerator.SetSeed(seed) dim = model.getInputDimension() pstar = np.atleast_2d(pstar) nPoint = pstar.shape[0] stdev = [sd] * dim corr = ot.IdentityMatrix(dim) if nPoint > 1: distribution_list = list() for point in pstar: distribution_list.append(ot.Normal(point, stdev, corr)) instrumental_distribution = ot.Mixture(distribution_list) elif nPoint == 1: instrumental_distribution = ot.Normal(pstar[0], stdev, corr) # Run importance sampling simulation experiment = ot.ImportanceSamplingExperiment(instrumental_distribution) simulation = ot.ProbabilitySimulationAlgorithm(ot.StandardEvent(new_event), experiment) simulation.setMaximumOuterSampling(outerSampling) simulation.setBlockSize(blockSize) simulation.setMaximumCoefficientOfVariation(coefVar) # try: simulation.run() # except Exception as e: # dump_cache(model, 'Cache/physicalModelMathFunction') # raise e result = simulation.getResult() dfResult = pd.DataFrame() dfResult = dfResult.append( pd.DataFrame([result.getProbabilityEstimate()], index=["Probability of failure"])) dfResult = dfResult.append( pd.DataFrame( [result.getCoefficientOfVariation()], index=["Coefficient of varation"], )) dfResult = dfResult.append( pd.DataFrame([result.getConfidenceLength()], index=["95 % Confidence length"])) dfResult = dfResult.append( pd.DataFrame( [result.getOuterSampling() * result.getBlockSize()], index=["Number of calls"], )) dfResult = dfResult.reset_index() dfResult.columns = ["", "Results - Importance Sampling"] if verbose: print(dfResult, "\n") return simulation
# %% # We want to estimate the probability :math:`P_f` of the output variable to be greater than a prescribed threshold :math:`s=10` : this is the failure event. This probability is simply expressed as an integral : # # .. math:: # # P_f = \int_{\mathcal{D}} \mathbf{1}_{\mathcal{D}}(x) df_{X_1,X_2}(x) # # where :math:`\mathcal{D} = \{ (x_1, x_2) \in [0,+\infty[ \times \mathbb{R} / x_1 x_2 \geq s \}` is the failure domain. # In the general case the probability density function :math:`f_{X_1,X_2}` and the domain of integration :math:`\mathcal{D}` are difficult to handle. # %% # We first define RandomVector objects and the failure event associated to the ouput random variable. vectorX = ot.RandomVector(distX) vectorY = ot.CompositeRandomVector(f, vectorX) s = 10.0 event = ot.ThresholdEvent(vectorY, ot.Greater(), s) # %% # This event can easily be represented with a 1D curve as it is a branch of an hyperbole. # If :math:`y = x_1 x_2 = 10.0`, then the boundary of the domain of failure is the curve : # # .. math:: # # h : x_1 \mapsto \frac{10.0}{x_1} # # %% # We shall represent this curve using a :class:`~openturns.Contour` object. nx, ny = 15, 15 xx = ot.Box([nx], ot.Interval([0.0], [10.0])).generate() yy = ot.Box([ny], ot.Interval([-10.0], [10.0])).generate()
def run_MonteCarlo( event, coefVar=0.1, outerSampling=10000, blockSize=10, seed=1234, verbose=False, failure_domain=None, ): """ Run a Monte Carlo simulation. Parameters ---------- event : openturns.Event The failure event or a list of failure event. coefVar : float The target coefficient of variation. outerSampling : int The maximum number of outer iterations. Nb of iterations = outerSampling x blockSize. blockSize : int The number of samples send to evaluate simultaneously. seed : int Seed for the openturns random generator. logfile : bool Enable or not to write the log in MonteCarlo.log file. verbose : bool Enable or not the display of the result. activeCache : bool Enable or not the cache mechanism of the NumericalMathFunction. activeHistory : bool Enable or not the history mechanism of the NumericalMathFunction. failure_domain : string Type of failure domain form : either 'union' or 'intersection'. Only needed if the event is a list. """ # case with the limit state defined as an intersection or a # union of the event if type(event) is list: n_event = len(event) antecedent = event[0].getAntecedent() if failure_domain == "union": def function_union(X): sample = ot.NumericalSample(X.getSize(), n_event) for i in range(n_event): sample[:, i] = event[i].getFunction()(X) sample = np.array(sample) for i in range(n_event): if (event[i].getOperator().getImplementation( ).getClassName() == "Less" or event[i].getOperator( ).getImplementation().getClassName() == "LessOrEqual"): sample[:, i] = sample[:, i] < event[i].getThreshold() if (event[i].getOperator().getImplementation( ).getClassName() == "Greater" or event[i].getOperator( ).getImplementation().getClassName() == "GreaterOrEqual"): sample[:, i] = sample[:, i] >= event[i].getThreshold() return np.atleast_2d(sample.sum(axis=1)).T model = ot.PythonFunction( event[0].getFunction().getInputDimension(), event[0].getFunction().getOutputDimension(), func_sample=function_union, ) output = ot.RandomVector(model, antecedent) event = ot.ThresholdEvent(output, ot.Greater(), 0.0) elif failure_domain == "intersection": def function_intersection(X): sample = ot.NumericalSample(X.getSize(), n_event) for i in range(n_event): sample[:, i] = event[i].getFunction()(X) sample = np.array(sample) for i in range(n_event): if (event[i].getOperator().getImplementation( ).getClassName() == "Less" or event[i].getOperator( ).getImplementation().getClassName() == "LessOrEqual"): sample[:, i] = sample[:, i] < event[i].getThreshold() if (event[i].getOperator().getImplementation( ).getClassName() == "Greater" or event[i].getOperator( ).getImplementation().getClassName() == "GreaterOrEqual"): sample[:, i] = sample[:, i] >= event[i].getThreshold() return np.atleast_2d(sample.prod(axis=1)).T model = ot.PythonFunction( event[0].getFunction().getInputDimension(), event[0].getFunction().getOutputDimension(), func_sample=function_intersection, ) output = ot.RandomVector(model, antecedent) new_event = ot.ThresholdEvent(output, ot.Greater(), 0.0) else: model = event.getFunction() new_event = event # Initialize the random generator ot.RandomGenerator.SetSeed(seed) # Run Monte Carlo simulation experiment = ot.MonteCarloExperiment() simulation = ot.ProbabilitySimulationAlgorithm(new_event, experiment) simulation.setMaximumCoefficientOfVariation(coefVar) simulation.setMaximumOuterSampling(outerSampling) simulation.setBlockSize(blockSize) # try: simulation.run() # except Exception as e: # dump_cache(model, 'Cache/physicalModelMathFunction') # raise e result = simulation.getResult() dfResult = pd.DataFrame() dfResult = dfResult.append( pd.DataFrame([result.getProbabilityEstimate()], index=["Probability of failure"])) dfResult = dfResult.append( pd.DataFrame( [result.getCoefficientOfVariation()], index=["Coefficient of varation"], )) dfResult = dfResult.append( pd.DataFrame([result.getConfidenceLength()], index=["95 % Confidence length"])) dfResult = dfResult.append( pd.DataFrame( [result.getOuterSampling() * result.getBlockSize()], index=["Number of calls"], )) dfResult = dfResult.reset_index() dfResult.columns = ["", "Results - Monte Carlo"] if verbose: print(dfResult, "\n") return simulation
Zm_law = ot.Triangular(54., 55., 56.) coll = ot.DistributionCollection([Q_law, Ks_law, Zv_law, Zm_law]) distribution = ot.ComposedDistribution(coll) x = list(map(lambda dist: dist.computeQuantile(0.5)[0], coll)) fx = function(x) for k in [0.0, 2.0, 5.0, 8.][0:1]: randomVector = ot.RandomVector(distribution) composite = ot.RandomVector(function, randomVector) print('--------------------') print('model flood S <', k, 'gamma=', end=' ') print('f(', ot.NumericalPoint(x), ')=', fx) event = ot.Event(composite, ot.Greater(), k) for n in [100, 1000, 5000][1:2]: for gamma1 in [0.25, 0.5, 0.75][1:2]: algo = ot.MonteCarlo(event) algo.setMaximumOuterSampling(100 * n) # algo.setMaximumCoefficientOfVariation(-1.) algo.run() result = algo.getResult() print(result) algo = otads.AdaptiveDirectionalSampling(event) algo.setMaximumOuterSampling(n) algo.setGamma([gamma1, 1.0 - gamma1]) calls0 = function.getEvaluationCallsNumber() algo.run() calls = function.getEvaluationCallsNumber() - calls0 result = algo.getResult()
# Stream out the result print('algo result=', myAlgo.getResult()) print('probability distribution=', myAlgo.getResult().getProbabilityDistribution()) print('-' * 32) ot.RandomGenerator.SetSeed(0) description = ot.Description() description.add('composite vector/comparison event') dim = 2 distribution = ot.Normal(dim) Xvector = ot.RandomVector(distribution) f = ot.SymbolicFunction(['x0', 'x1'], ['x0+x1']) Yvector = ot.CompositeRandomVector(f, Xvector) s = 1.0 event1 = ot.Event(Yvector, ot.Greater(), s) description.add('composite vector/domain event') domain1D = ot.LevelSet(ot.SymbolicFunction(['x0'], ['sin(x0)']), ot.LessOrEqual(), -0.5) event2 = ot.Event(Yvector, domain1D) description.add('composite vector/interval event') interval = ot.Interval(0.5, 1.5) event3 = ot.Event(Yvector, interval) description.add('process/domain event') Xprocess = ot.WhiteNoise(distribution, ot.RegularGrid(0.0, 0.1, 10)) domain2D = ot.LevelSet(ot.SymbolicFunction(['x0', 'x1'], ['(x0-1)^2+x1^2']), ot.LessOrEqual(), 1.0) event4 = ot.Event(Xprocess, domain2D) all_events = [event1, event2, event3, event4] for i, event in enumerate(all_events): print(description[i])
from __future__ import print_function import openturns as ot ot.TESTPREAMBLE() model = ot.SymbolicFunction(['x0', 'x1', 'x2', 'x3'], ['-(6+x0^2-x1+x2+3*x3)']) dim = model.getInputDimension() marginals = [ot.Normal(5.0, 3.0) for i in range(dim)] distribution = ot.ComposedDistribution( marginals, ot.ComposedCopula([ot.ClaytonCopula(), ot.NormalCopula()])) #distribution = ot.Normal([5]*dim, [3]*dim, ot.CorrelationMatrix(dim)) #distribution = ot.ComposedDistribution(marginals, ot.IndependentCopula(dim)) distribution.setDescription(['marginal' + str(i) for i in range(dim)]) vect = ot.RandomVector(distribution) output = ot.CompositeRandomVector(model, vect) event = ot.Event(output, ot.Greater(), 0.0) solver = ot.Cobyla() solver.setMaximumEvaluationNumber(200) solver.setMaximumAbsoluteError(1.0e-10) solver.setMaximumRelativeError(1.0e-10) solver.setMaximumResidualError(1.0e-10) solver.setMaximumConstraintError(1.0e-10) algo = ot.FORM(solver, event, distribution.getMean()) algo.run() result = algo.getResult() hasoferReliabilityIndexSensitivity = result.getHasoferReliabilityIndexSensitivity( ) print(hasoferReliabilityIndexSensitivity)
# The 3D mesher mesher3D = ot.LevelSetMesher([3] * 3) print("mesher3D=", mesher3D) function3D = ot.SymbolicFunction( ["x0", "x1", "x2"], ["cos(x0 * x1 + x2)/(1 + 0.1*(x0^2 + x1^2 + x2^2))"]) levelSet3D = ot.LevelSet(function3D, ot.LessOrEqual(), level) # Manual bounding box mesh3D = mesher3D.build(levelSet3D, ot.Interval([-10.0] * 3, [10.0] * 3)) print("mesh3D=", mesh3D) # Issue #1668 f = ot.SymbolicFunction(["x", "y"], ["x^2+y^2"]) levelset = ot.LevelSet(f, ot.Less(), 1.0) mesh = ot.LevelSetMesher([16] * 2).build( levelset, ot.Interval([-1.5] * 2, [1.5] * 2)) gLess = mesh.draw() f = ot.SymbolicFunction(["x", "y"], ["-(x^2+y^2)"]) levelset = ot.LevelSet(f, ot.Greater(), -1.0) mesh = ot.LevelSetMesher([16] * 2).build( levelset, ot.Interval([-1.5] * 2, [1.5] * 2)) gGreater = mesh.draw() ott.assert_almost_equal( gLess.getDrawable(0).getData(), gGreater.getDrawable(0).getData(), 1e-4, 1e-4) except: import sys print("t_LevelSetMesher_std.py", sys.exc_info()[0], sys.exc_info()[1])
# Create a FORMResult input2 = ot.Description(2) input2[0] = 'x' input2[1] = 'y' output2 = ot.Description(1) output2[0] = 'd' formula2 = ot.Description(1) formula2[0] = 'y^2-x' model = ot.Function(input2, output2, formula2) model.setName('sum') input3 = ot.RandomVector(ot.Normal(2)) input3.setName('input') output3 = ot.RandomVector(model, input3) output3.setName('output') event = ot.Event(output3, ot.Greater(), 1.0) event.setName('failureEvent') designPoint = ot.Point(2, 0.0) designPoint[0] = 1.0 formResult = ot.FORMResult(ot.Point(2, 1.0), event, False) formResult.setName('formResult') formResult.getImportanceFactors() formResult.getEventProbabilitySensitivity() myStudy.add('formResult', formResult) # Create a SORMResult sormResult = ot.SORMResult([1.0] * 2, event, False) sormResult.setName('sormResult') sormResult.getEventProbabilityBreitung() sormResult.getEventProbabilityHohenBichler() sormResult.getEventProbabilityTvedt()
import otrobopt #ot.Log.Show(ot.Log.Info) calJ = ot.SymbolicFunction(['x', 'theta'], ['x^3 - 3*x + theta']) calG = ot.SymbolicFunction(['x', 'theta'], ['-(x + theta - 2)']) J = ot.ParametricFunction(calJ, [1], [0.5]) g = ot.ParametricFunction(calG, [1], [0.5]) dim = J.getInputDimension() solver = ot.Cobyla() solver.setMaximumIterationNumber(1000) solver.setStartingPoint([0.0] * dim) thetaDist = ot.Exponential(2.0) robustnessMeasure = otrobopt.MeanMeasure(J, thetaDist) reliabilityMeasure = otrobopt.JointChanceMeasure(g, thetaDist, ot.Greater(), 0.9) problem = otrobopt.RobustOptimizationProblem(robustnessMeasure, reliabilityMeasure) problem.setMinimization(False) algo = otrobopt.SequentialMonteCarloRobustAlgorithm(problem, solver) algo.setMaximumIterationNumber(10) algo.setMaximumAbsoluteError(1e-3) algo.setInitialSamplingSize(10) algo.run() result = algo.getResult() print ('x*=', result.getOptimalPoint(), 'J(x*)=', result.getOptimalValue(), 'iteration=', result.getIterationNumber())
myStudy.add('taylorExpansionsMoments', taylorExpansionsMoments) # Create a FORMResult input2 = ot.Description(2) input2[0] = 'x' input2[1] = 'y' formula2 = ot.Description(1) formula2[0] = 'y^2-x' model = ot.SymbolicFunction(input2, formula2) model.setName('sum') input3 = ot.RandomVector(ot.Normal(2)) input3.setName('input') output3 = ot.CompositeRandomVector(model, input3) output3.setName('output') event = ot.ThresholdEvent(output3, ot.Greater(), 1.0) event.setName('failureEvent') designPoint = ot.Point(2, 0.0) designPoint[0] = 1.0 formResult = ot.FORMResult(ot.Point(2, 1.0), event, False) formResult.setName('formResult') formResult.getImportanceFactors() formResult.getEventProbabilitySensitivity() myStudy.add('formResult', formResult) # Create a SORMResult sormResult = ot.SORMResult([1.0] * 2, event, False) sormResult.setName('sormResult') sormResult.getEventProbabilityBreitung() sormResult.getEventProbabilityHohenbichler() sormResult.getEventProbabilityTvedt()
import openturns as ot import persalys myStudy = persalys.Study('myStudy') # Model X0 = persalys.Input('X0', ot.Normal(1, 1)) X1 = persalys.Input('X1', ot.Normal(1, 1)) Y0 = persalys.Output('Y0') model = persalys.SymbolicPhysicalModel('aModelPhys', [X0, X1], [Y0], ['sin(X0) + 8*X1']) myStudy.add(model) # limit state ## limitState = persalys.LimitState('aLimitState', model, 'Y0', ot.Greater(), 20.) print(limitState) myStudy.add(limitState) limitState.setThreshold(10.) limitState.setOperator(ot.Less()) print(limitState) # limit state ## limitState2 = persalys.LimitState('aLimitState2', model) print(limitState2) myStudy.add(limitState2) limitState2.setThreshold(15.) print(limitState2)
# %% # We use the input parameters distribution from the data class : distribution = cb.distribution distribution.setDescription(['E', 'F', 'L', 'I']) # %% # We define the model model = cb.model # %% # Create the event whose probability we want to estimate. # %% vect = ot.RandomVector(distribution) G = ot.CompositeRandomVector(model, vect) event = ot.ThresholdEvent(G, ot.Greater(), 0.3) event.setName("deviation") # %% # Define a solver optimAlgo = ot.Cobyla() optimAlgo.setMaximumEvaluationNumber(1000) optimAlgo.setMaximumAbsoluteError(1.0e-10) optimAlgo.setMaximumRelativeError(1.0e-10) optimAlgo.setMaximumResidualError(1.0e-10) optimAlgo.setMaximumConstraintError(1.0e-10) # %% # Run FORM algo = ot.FORM(optimAlgo, event, distribution.getMean()) algo.run()
dim = 2 distribution = ot.Normal(dim) X = ot.RandomVector(distribution) # 1. Composite/Composite f1 = ot.SymbolicFunction(['x'+str(i) for i in range(dim)], ['x0']) f2 = ot.SymbolicFunction(['x'+str(i) for i in range(dim)], ['x1']) Y1 = ot.CompositeRandomVector(f1, X) Y2 = ot.CompositeRandomVector(f2, X) e1 = ot.Event(Y1, ot.Less(), 0.0) e2 = ot.Event(Y2, ot.Greater(), 0.0) e3 = e1.intersect(e2) #print('e3=', e3) # sampling test algo = ot.ProbabilitySimulationAlgorithm(e3) algo.setMaximumOuterSampling(250) algo.setBlockSize(4) algo.setMaximumCoefficientOfVariation(-0.1) algo.run() print("proba_e3 = %.3g" % algo.getResult().getProbabilityEstimate()) e4 = e1.join(e2) #print('e4=', e4)