示例#1
0
def create_monte_carlo(model, inputRandomVector, coefficient_variation):
    """Create a Monte Carlo algorithm.

    Parameters
    ----------
    model : OpenTURNS Function.

    inputRandomVector : OpenTURNS RandomVector, vector of random inputs.

    coefficient_variation : Float, target for the coefficient of variation of
    the estimator.

    """

    outputVariableOfInterest = ot.CompositeRandomVector(model, inputRandomVector)
    # Create an Event from this RandomVector
    threshold = 30
    myEvent = ot.ThresholdEvent(outputVariableOfInterest, ot.Greater(), threshold)
    myEvent.setName("Deviation > %g cm" % threshold)

    # Create a Monte Carlo algorithm
    experiment = ot.MonteCarloExperiment()
    myAlgoMonteCarlo = ot.ProbabilitySimulationAlgorithm(myEvent, experiment)
    myAlgoMonteCarlo.setBlockSize(100)
    myAlgoMonteCarlo.setMaximumCoefficientOfVariation(coefficient_variation)

    return myAlgoMonteCarlo
示例#2
0
    def test_UseCaseMonteCarlo(self):
        problem = otb.ReliabilityProblem14()
        event = problem.getEvent()

        # Create a Monte Carlo algorithm
        experiment = ot.MonteCarloExperiment()
        algo = ot.ProbabilitySimulationAlgorithm(event, experiment)
        algo.setMaximumCoefficientOfVariation(0.01)
        algo.setBlockSize(int(1.0e3))
        algo.setMaximumOuterSampling(int(1e3))
        algo.run()
        # Retrieve results
        result = algo.getResult()
        computed_pf = result.getProbabilityEstimate()
        exact_pf = problem.getProbability()
        print("exact_pf=", exact_pf)
        print("computed_pf=", computed_pf)
        samplesize = result.getOuterSampling() * result.getBlockSize()
        alpha = 0.05
        pflen = result.getConfidenceLength(1 - alpha)
        print(
            "%.2f%% confidence interval = [%f,%f]"
            % ((1 - alpha) * 100, computed_pf - pflen / 2, computed_pf + pflen / 2)
        )
        print("Sample size : ", samplesize)
        atol = 1.0e2 / np.sqrt(samplesize)
        np.testing.assert_allclose(computed_pf, exact_pf, atol=atol)
def computeCrossingProbability_MonteCarlo(b, t, mu_S, covariance, R, delta_t,
                                          n_block, n_iter, CoV):
    X, event = getXEvent(b, t, mu_S, covariance, R, delta_t)
    algo = ot.ProbabilitySimulationAlgorithm(event, ot.MonteCarloExperiment())
    algo.setBlockSize(n_block)
    algo.setMaximumOuterSampling(n_iter)
    algo.setMaximumCoefficientOfVariation(CoV)
    algo.run()
    return algo.getResult().getProbabilityEstimate() / delta_t
示例#4
0
def sim_event(ev):
    experiment = ot.MonteCarloExperiment()
    algo = ot.ProbabilitySimulationAlgorithm(ev, experiment)
    algo.setMaximumOuterSampling(2500)
    algo.setBlockSize(4)
    algo.setMaximumCoefficientOfVariation(-1.0)
    algo.run()
    result = algo.getResult()
    return result.getProbabilityEstimate()
示例#5
0
def build_Xy_table(ABM,
                   size_sample,
                   size_MC,
                   SequenceFunction,
                   SequenceFunctionString,
                   as_df=True,
                   to_file=True,
                   **kwargs):

    import os

    problem_ABM = ABM.problem()

    if SequenceFunction == ot.MonteCarloExperiment:
        unscaled_set_X = np.array([
            ot.MonteCarloExperiment(ot.Uniform(0, 1), size_sample).generate()
            for i in range(problem_ABM['num_vars'])
        ])
        unscaled_set_X = unscaled_set_X.reshape(size_sample,
                                                problem_ABM['num_vars'])

    else:
        SequenceFunction = SequenceFunction(problem_ABM['num_vars'], **kwargs)
        unscaled_set_X = np.array(SequenceFunction.generate(size_sample))

    set_X = rescale_sample(unscaled_set_X, problem_ABM['bounds'])

    # Evaluate
    set_y = [
        np.array(Parallel(n_jobs=-1)(delayed(ABM.model)(p) for p in set_X))
        for i in range(size_MC)
    ]

    # as DataFrame
    df = pd.DataFrame(set_X, columns=problem_ABM['names']).join(
        pd.DataFrame(set_y).T.add_prefix('evaluation_'))

    if to_file:
        # save file
        directory = 'ABM_eval_' + problem_ABM['abm_name']
        filename = SequenceFunctionString + '_ss' + str(
            size_sample) + '_MC' + str(size_MC)
        if not os.path.exists(directory):
            os.makedirs(directory + '/' + filename)
        df.to_csv(directory + '/' + filename, index=False)
        print 'Saved file ' + filename
    if as_df:
        return df
    def buildMonteCarlo(self, problem):
        """
        Creates a Monte-Carlo algorithm.

        We create a MonteCarloExperiment and we create
        a ProbabilitySimulationAlgorithm based on the problem event.

        Parameters
        ----------
        problem : ot.ReliabilityBenchmarkProblem
            The problem.

        Returns
        ----------
        algo : ot.ProbabilitySimulationAlgorithm
            The Monte-Carlo algorithm for estimating the probability.
        """
        myEvent = problem.getEvent()
        experiment = ot.MonteCarloExperiment()
        algo = ot.ProbabilitySimulationAlgorithm(myEvent, experiment)
        return algo
    def test_UseCase(self):
        problem = otb.ReliabilityProblem60()
        event = problem.getEvent()

        # Create a Monte Carlo algorithm
        experiment = ot.MonteCarloExperiment()
        algo = ot.ProbabilitySimulationAlgorithm(event, experiment)
        algo.setMaximumCoefficientOfVariation(0.05)
        algo.setMaximumOuterSampling(int(1e5))
        algo.run()

        # Retrieve results
        result = algo.getResult()
        computed_pf = result.getProbabilityEstimate()
        exact_pf = problem.getProbability()
        print("exact_pf=", exact_pf)
        print("computed_pf=", computed_pf)
        samplesize = result.getOuterSampling() * result.getBlockSize()
        print("Sample size : ", samplesize)
        atol = 1.0 / np.sqrt(samplesize)
        np.testing.assert_allclose(computed_pf, exact_pf, atol=atol)
示例#8
0
    print("asymptotic intervals:")
    print("Aggregated first order indices interval = ", interval_fo_asymptotic)
    print("Aggregated total order indices interval = ", interval_to_asymptotic)

# special case in dim=2
ot.ResourceMap.SetAsString('SobolIndicesExperiment-SamplingMethod',
                           'MonteCarlo')
ot.RandomGenerator.SetSeed(0)
distribution = ot.ComposedDistribution([ot.Uniform()] * 2)
size = 1000
model = ot.SymbolicFunction(['X1', 'X2'], ['2*X1 + X2'])
sensitivity_algorithm = ot.SaltelliSensitivityAlgorithm(
    distribution, size, model, True)
print(sensitivity_algorithm.getSecondOrderIndices())
ot.RandomGenerator.SetSeed(0)
experiment = ot.MonteCarloExperiment(distribution, size)
sensitivity_algorithm = ot.SaltelliSensitivityAlgorithm(
    experiment, model, True)
print(sensitivity_algorithm.getSecondOrderIndices())
ot.RandomGenerator.SetSeed(0)
x = ot.SobolIndicesExperiment(distribution, size, True).generate()
y = model(x)
sensitivity_algorithm = ot.SaltelliSensitivityAlgorithm(x, y, size)
print(sensitivity_algorithm.getSecondOrderIndices())

# null contribution case: X3 not in output formula
model = ot.SymbolicFunction(['X1', 'X2', 'X3'], ['10+3*X1+X2'])
distribution = ot.ComposedDistribution([ot.Uniform(-1.0, 1.0)] *
                                       input_dimension)
size = 10000
for method in methods:
示例#9
0
    ot.Interval([low] * 2, [up] * 2, [False, True], [True, False]),
    ot.Interval([low] * 2, [up] * 2, [False, True], [False, True]),
    ot.Interval([low] * 2, [up] * 2, [False, True], [False, False]),
    ot.Interval([low] * 2, [up] * 2, [False, False], [True, True]),
    ot.Interval([low] * 2, [up] * 2, [False, False], [True, False]),
    ot.Interval([low] * 2, [up] * 2, [False, False], [False, True]),
    ot.Interval([low] * 2, [up] * 2, [False, False], [False, False])
]

for domain in intervals:
    print('#' * 50)
    print('domain=\n', domain)
    outDim = domain.getDimension()
    f = ot.SymbolicFunction(inVars, inVars[0:outDim])
    Y = ot.CompositeRandomVector(f, X)
    event = ot.ThresholdEvent(Y, domain)

    ot.RandomGenerator.SetSeed(0)
    # algo = getattr(openturns, algoName)(event)
    algo = ot.ProbabilitySimulationAlgorithm(event, ot.MonteCarloExperiment())
    algo.run()
    res = algo.getResult().getProbabilityEstimate()
    print('MC p=%.6g' % res)

    ot.RandomGenerator.SetSeed(0)
    # algo = getattr(openturns, algoName)(event)
    algo = ot.FORM(ot.Cobyla(), event, X.getMean())
    algo.run()
    res = algo.getResult().getEventProbability()
    print('FORM p=%.2f' % res)
import openturns as ot
from openturns.viewer import View

# MonteCarlo
d = ot.MonteCarloExperiment(ot.ComposedDistribution([ot.Uniform()] * 3), 32)
s = d.generate()
s.setDescription(["X1", "X2", "X3"])
g = ot.Graph()
g.setTitle("MonteCarlo experiment")
g.setGridColor("black")
p = ot.Pairs(s)
g.add(p)
View(g)
示例#11
0
mostSignificant = 50
significanceFactor = 1.0e-4
truncatureBasisStrategy_2 = ot.CleaningStrategy(
    multivariateBasis, maximumConsideredTerms, mostSignificant, significanceFactor, True)

# %%
# STEP 3: Evaluation strategy of the approximation coefficients
# -------------------------------------------------------------

# %%
# The technique illustrated is the Least Squares technique where the points come from an design of experiments. Here : the Monte Carlo sampling technique.

# %%
sampleSize = 100
evaluationCoeffStrategy = ot.LeastSquaresStrategy(
    ot.MonteCarloExperiment(sampleSize))

# %%
# You can specify the approximation algorithm. This is the algorithm that generates a sequence of basis using Least Angle Regression.

# %%
basisSequenceFactory = ot.LARS()

# %%
# This algorithm estimates the empirical error on each sub-basis using Leave One Out strategy.

# %%
fittingAlgorithm = ot.CorrectedLeaveOneOut()
# Finally the metamodel selection algorithm embbeded in LeastSquaresStrategy
approximationAlgorithm = ot.LeastSquaresMetaModelSelectionFactory(
    basisSequenceFactory, fittingAlgorithm)
示例#12
0
mean[3] = 5.0
sigma = [1.0] * dim
R = ot.IdentityMatrix(dim)
myDistribution = ot.Normal(mean, sigma, R)

# We create a 'usual' RandomVector from the Distribution
vect = ot.RandomVector(myDistribution)

# We create a composite random vector
output = ot.CompositeRandomVector(myFunction, vect)

# We create an Event from this RandomVector
myEvent = ot.Event(output, ot.Less(), -3.0)

# Monte Carlo
experiments = [ot.MonteCarloExperiment()]
# Quasi Monte Carlo
experiments.append(ot.LowDiscrepancyExperiment())
# Randomized Quasi Monte Carlo
experiment = ot.LowDiscrepancyExperiment()
experiment.setRandomize(True)
experiments.append(experiment)
# Importance sampling
mean[0] = 4.99689645939288809018e+01
mean[1] = 1.84194175946153282375e+00
mean[2] = 1.04454036676956398821e+01
mean[3] = 4.66776215562709406726e+00
myImportance = ot.Normal(mean, sigma, R)
experiments.append(ot.ImportanceSamplingExperiment(myImportance))
# Randomized LHS
experiment = ot.LHSExperiment()
示例#13
0
def run_MonteCarlo(
    event,
    coefVar=0.1,
    outerSampling=10000,
    blockSize=10,
    seed=1234,
    verbose=False,
    failure_domain=None,
):
    """
    Run a Monte Carlo simulation.

    Parameters
    ----------
    event : openturns.Event
        The failure event or a list of failure event.
    coefVar : float
         The target coefficient of variation.
    outerSampling : int
        The maximum number of outer iterations.
        Nb of iterations = outerSampling x blockSize.
    blockSize : int
        The number of samples send to evaluate simultaneously.
    seed : int
        Seed for the openturns random generator.
    logfile : bool
        Enable or not to write the log in MonteCarlo.log file.
    verbose : bool
        Enable or not the display of the result.
    activeCache : bool
        Enable or not the cache mechanism of the NumericalMathFunction.
    activeHistory : bool
        Enable or not the history mechanism of the NumericalMathFunction.
    failure_domain : string
        Type of failure domain form : either 'union' or 'intersection'. Only
        needed if the event is a list.
    """

    # case with the limit state defined as an intersection or a
    # union of the event
    if type(event) is list:
        n_event = len(event)
        antecedent = event[0].getAntecedent()

        if failure_domain == "union":

            def function_union(X):
                sample = ot.NumericalSample(X.getSize(), n_event)
                for i in range(n_event):
                    sample[:, i] = event[i].getFunction()(X)

                sample = np.array(sample)
                for i in range(n_event):
                    if (event[i].getOperator().getImplementation(
                    ).getClassName() == "Less" or event[i].getOperator(
                    ).getImplementation().getClassName() == "LessOrEqual"):
                        sample[:, i] = sample[:, i] < event[i].getThreshold()
                    if (event[i].getOperator().getImplementation(
                    ).getClassName() == "Greater" or event[i].getOperator(
                    ).getImplementation().getClassName() == "GreaterOrEqual"):
                        sample[:, i] = sample[:, i] >= event[i].getThreshold()
                return np.atleast_2d(sample.sum(axis=1)).T

            model = ot.PythonFunction(
                event[0].getFunction().getInputDimension(),
                event[0].getFunction().getOutputDimension(),
                func_sample=function_union,
            )
            output = ot.RandomVector(model, antecedent)
            event = ot.ThresholdEvent(output, ot.Greater(), 0.0)

        elif failure_domain == "intersection":

            def function_intersection(X):
                sample = ot.NumericalSample(X.getSize(), n_event)
                for i in range(n_event):
                    sample[:, i] = event[i].getFunction()(X)

                sample = np.array(sample)
                for i in range(n_event):
                    if (event[i].getOperator().getImplementation(
                    ).getClassName() == "Less" or event[i].getOperator(
                    ).getImplementation().getClassName() == "LessOrEqual"):
                        sample[:, i] = sample[:, i] < event[i].getThreshold()
                    if (event[i].getOperator().getImplementation(
                    ).getClassName() == "Greater" or event[i].getOperator(
                    ).getImplementation().getClassName() == "GreaterOrEqual"):
                        sample[:, i] = sample[:, i] >= event[i].getThreshold()
                return np.atleast_2d(sample.prod(axis=1)).T

            model = ot.PythonFunction(
                event[0].getFunction().getInputDimension(),
                event[0].getFunction().getOutputDimension(),
                func_sample=function_intersection,
            )
            output = ot.RandomVector(model, antecedent)
            new_event = ot.ThresholdEvent(output, ot.Greater(), 0.0)
    else:
        model = event.getFunction()
        new_event = event

    # Initialize the random generator
    ot.RandomGenerator.SetSeed(seed)

    # Run Monte Carlo simulation
    experiment = ot.MonteCarloExperiment()
    simulation = ot.ProbabilitySimulationAlgorithm(new_event, experiment)
    simulation.setMaximumCoefficientOfVariation(coefVar)
    simulation.setMaximumOuterSampling(outerSampling)
    simulation.setBlockSize(blockSize)

    # try:
    simulation.run()
    # except Exception as e:
    #     dump_cache(model, 'Cache/physicalModelMathFunction')
    #     raise e

    result = simulation.getResult()

    dfResult = pd.DataFrame()
    dfResult = dfResult.append(
        pd.DataFrame([result.getProbabilityEstimate()],
                     index=["Probability of failure"]))
    dfResult = dfResult.append(
        pd.DataFrame(
            [result.getCoefficientOfVariation()],
            index=["Coefficient of varation"],
        ))
    dfResult = dfResult.append(
        pd.DataFrame([result.getConfidenceLength()],
                     index=["95 % Confidence length"]))
    dfResult = dfResult.append(
        pd.DataFrame(
            [result.getOuterSampling() * result.getBlockSize()],
            index=["Number of calls"],
        ))
    dfResult = dfResult.reset_index()
    dfResult.columns = ["", "Results - Monte Carlo"]

    if verbose:
        print(dfResult, "\n")

    return simulation
示例#14
0
field = ot.Field(mesh, values)
evaluation = ot.P1LagrangeEvaluation(field)
x = [2.3]
y = evaluation(x)
print(y)
ott.assert_almost_equal(y, [0.55])

# Learning sample on meshD
mesher = ot.IntervalMesher([7, 7])
lowerbound = [-1.0, -1.0]
upperBound = [1, 1]
interval = ot.Interval(lowerbound, upperBound)
meshD = mesher.build(interval)
sample = ot.ProcessSample(meshD, 10, 1)
field = ot.Field(meshD, 1)
for k in range(sample.getSize()):
    field.setValues(ot.Normal().getSample(64))
    sample[k] = field
lagrange = ot.P1LagrangeEvaluation(sample)
# New mesh
mesh = ot.Mesh(
    ot.MonteCarloExperiment(ot.ComposedDistribution([ot.Uniform(-1, 1)] * 2),
                            200).generate())

point = mesh.getVertices()[0]
y = lagrange(point)
print(y)
index = lagrange.getEnclosingSimplexAlgorithm().query(point)
print(index)
assert index == 12, "wrong index"
def myMonteCarloExperiment(distribution, size, model):
    experiment = ot.MonteCarloExperiment(distribution, size)
    sensitivity_algorithm = ot.SaltelliSensitivityAlgorithm(experiment, model)
    return sensitivity_algorithm
示例#16
0
S = ot.CorrelationMatrix(2)
S[1, 0] = 0.3
R = ot.NormalCopula.GetCorrelationFromSpearmanCorrelation(S)
copula = ot.NormalCopula(R)
distribution_corr = ot.ComposedDistribution([ot.Normal()] * 2, copula)

# %%
# ANCOVA needs a functional decomposition of the model
enumerateFunction = ot.LinearEnumerateFunction(2)
productBasis = ot.OrthogonalProductPolynomialFactory([ot.HermiteFactory()] * 2,
                                                     enumerateFunction)
adaptiveStrategy = ot.FixedStrategy(
    productBasis, enumerateFunction.getStrataCumulatedCardinal(4))
samplingSize = 250
projectionStrategy = ot.LeastSquaresStrategy(
    ot.MonteCarloExperiment(samplingSize))
algo = ot.FunctionalChaosAlgorithm(model, distribution, adaptiveStrategy,
                                   projectionStrategy)
algo.run()
result = ot.FunctionalChaosResult(algo.getResult())

# %%
# Create the input sample taking account the correlation
size = 2000
sample = distribution_corr.getSample(size)

# %%
# Perform the decomposition
ancova = ot.ANCOVA(result, sample)
# Compute the ANCOVA indices (first order and uncorrelated indices are computed together)
indices = ancova.getIndices()
示例#17
0
# %%
# Run FORM
myAlgo = ot.FORM(myCobyla, myEvent, mean)
myAlgo.run()
result = myAlgo.getResult()
print('event probability:', result.getEventProbability())
print('calls number:', myFunction.getCallsNumber())

# %%
# **Stop a simulation algorithm using a time limit**
#
# Here we will create a callback to not exceed a specified simulation time.

# %%
# Create simulation
experiment = ot.MonteCarloExperiment()
myAlgo = ot.ProbabilitySimulationAlgorithm(myEvent, experiment)
myAlgo.setMaximumOuterSampling(1000000)
myAlgo.setMaximumCoefficientOfVariation(-1.0)

# %%
# Define the stopping criterion
timer = ot.TimerCallback(0.01)
myAlgo.setStopCallback(timer)

# %%
# Run algorithm
myAlgo.run()
result = myAlgo.getResult()
print('event probability:', result.getProbabilityEstimate())
print('calls number:', myFunction.getCallsNumber())
示例#18
0
# %%
# Create the correlated input distribution
S = ot.CorrelationMatrix(2)
S[1, 0] = 0.3
R = ot.NormalCopula.GetCorrelationFromSpearmanCorrelation(S)
copula = ot.NormalCopula(R)
distribution_corr = ot.ComposedDistribution([ot.Normal()] * 2, copula)

# %%
# ANCOVA needs a functional decomposition of the model
enumerateFunction = ot.LinearEnumerateFunction(2)
productBasis = ot.OrthogonalProductPolynomialFactory([ot.HermiteFactory()]*2, enumerateFunction)
adaptiveStrategy = ot.FixedStrategy(productBasis, enumerateFunction.getStrataCumulatedCardinal(4))
samplingSize = 250
projectionStrategy = ot.LeastSquaresStrategy(ot.MonteCarloExperiment(samplingSize))
algo = ot.FunctionalChaosAlgorithm(model, distribution, adaptiveStrategy, projectionStrategy)
algo.run()
result = ot.FunctionalChaosResult(algo.getResult())

# %%
# Create the input sample taking account the correlation
size = 2000
sample = distribution_corr.getSample(size)

# %%
# Perform the decomposition
ancova = ot.ANCOVA(result, sample)
# Compute the ANCOVA indices (first order and uncorrelated indices are computed together)
indices = ancova.getIndices()
# Retrieve uncorrelated indices