示例#1
0
 def test_UseCaseFORM(self):
     problem = otb.ReliabilityProblem14()
     event = problem.getEvent()
     distribution = event.getAntecedent().getDistribution()
     # We create a NearestPoint algorithm
     myCobyla = ot.Cobyla()
     # Resolution options:
     eps = 1e-3
     myCobyla.setMaximumEvaluationNumber(100)
     myCobyla.setMaximumAbsoluteError(eps)
     myCobyla.setMaximumRelativeError(eps)
     myCobyla.setMaximumResidualError(eps)
     myCobyla.setMaximumConstraintError(eps)
     # For statistics about the algorithm
     algo = ot.FORM(myCobyla, event, distribution.getMean())
     algo.run()
     resultFORM = algo.getResult()
     # Combine with Importance Sampling
     standardSpaceDesignPoint = resultFORM.getStandardSpaceDesignPoint()
     dimension = distribution.getDimension()
     myImportance = ot.Normal(dimension)
     myImportance.setMean(standardSpaceDesignPoint)
     experiment = ot.ImportanceSamplingExperiment(myImportance)
     standardEvent = ot.StandardEvent(event)
     algo = ot.ProbabilitySimulationAlgorithm(standardEvent, experiment)
     algo.setMaximumCoefficientOfVariation(0.01)
     algo.setBlockSize(int(1.0e3))
     algo.setMaximumOuterSampling(int(1e3))
     algo.run()
     result = algo.getResult()
     computed_pf = result.getProbabilityEstimate()
     exact_pf = problem.getProbability()
     print("exact_pf=", exact_pf)
     print("computed_pf=", computed_pf)
     samplesize = result.getOuterSampling() * result.getBlockSize()
     alpha = 0.05
     pflen = result.getConfidenceLength(1 - alpha)
     print(
         "%.2f%% confidence interval = [%f,%f]"
         % ((1 - alpha) * 100, computed_pf - pflen / 2, computed_pf + pflen / 2)
     )
     print("Sample size : ", samplesize)
     atol = 1.0e1 / np.sqrt(samplesize)
     np.testing.assert_allclose(computed_pf, exact_pf, atol=atol)
    def buildFORMIS(self, problem, nearestPointAlgorithm):
        """
        Creates a FORM-IS algorithm.

        We first create a FORM object based on the AbdoRackwitz
        and run it to get the design point in the standard space.
        Then we create an ImportanceSamplingExperiment based on the gaussian
        distribution, centered on the design point.
        Finally, we create a ProbabilitySimulationAlgorithm.

        Parameters
        ----------
        problem : ot.ReliabilityBenchmarkProblem
            The problem.
        nearestPointAlgorithm : ot.OptimizationAlgorithm
            Optimization algorithm used to search the design point.

        Returns
        ----------
        algo : ot.ProbabilitySimulationAlgorithm
            The FORM-IS algorithm for estimating the probability.
        """
        event = problem.getEvent()
        inputVector = event.getAntecedent()
        myDistribution = inputVector.getDistribution()
        physicalStartingPoint = myDistribution.getMean()
        algoFORM = ot.FORM(nearestPointAlgorithm, event, physicalStartingPoint)
        algoFORM.run()
        resultFORM = algoFORM.getResult()
        standardSpaceDesignPoint = resultFORM.getStandardSpaceDesignPoint()
        d = myDistribution.getDimension()
        myImportance = ot.Normal(d)
        myImportance.setMean(standardSpaceDesignPoint)
        experiment = ot.ImportanceSamplingExperiment(myImportance)
        standardEvent = ot.StandardEvent(event)
        algo = ot.ProbabilitySimulationAlgorithm(standardEvent, experiment)
        return algo
# The key point is to define the importance distribution in the U-space. To define it, we use a multivariate standard Gaussian and configure it so that the center is equal to the design point in the U-space.

# %%
dimension = myDistribution.getDimension()
dimension

# %%
myImportance = ot.Normal(dimension)
myImportance.setMean(standardSpaceDesignPoint)
myImportance

# %%
# Create the design of experiment corresponding to importance sampling. This generates a `WeightedExperiment` with weights corresponding to the importance distribution.

# %%
experiment = ot.ImportanceSamplingExperiment(myImportance)

# %%
# Create the standard event corresponding to the event. This transforms the original problem into the U-space, with Gaussian independent marginals.

# %%
standardEvent = ot.StandardEvent(myEvent)

# %%
# We then create the simulation algorithm.

# %%
algo = ot.ProbabilitySimulationAlgorithm(standardEvent, experiment)
algo.setMaximumCoefficientOfVariation(cv)
algo.setMaximumOuterSampling(40000)
示例#4
0
      SORM_result.getEventProbabilityBreitung())
print("Number of evaluations of the limit-state function: %s" %
      g.getInputHistory().getSize())

# # *Most-probable-failure-point*-based importance sampling

# In[32]:

g.clearHistory()

# In[33]:

instrumental_distribution = ot.Normal(
    FORM_result.getStandardSpaceDesignPoint(),
    ot.CovarianceMatrix(X_distribution.getDimension()))
IS_experiment = ot.ImportanceSamplingExperiment(instrumental_distribution)
IS_algorithm = ot.ProbabilitySimulationAlgorithm(ot.StandardEvent(event),
                                                 IS_experiment)
IS_algorithm.setMaximumOuterSampling(40000)
IS_algorithm.setBlockSize(1)
IS_algorithm.setMaximumCoefficientOfVariation(.1)
IS_algorithm.run()
IS_result = IS_algorithm.getResult()

# In[34]:

print("Probability estimate: %.3e" % IS_result.getProbabilityEstimate())
print("Coefficient of variation: %.2f" % IS_result.getCoefficientOfVariation())
print("Number of evaluations: %d" % g.getInputHistory().getSize())

# In[35]:
示例#5
0
# Monte Carlo
experiments = [ot.MonteCarloExperiment()]
# Quasi Monte Carlo
experiments.append(ot.LowDiscrepancyExperiment())
# Randomized Quasi Monte Carlo
experiment = ot.LowDiscrepancyExperiment()
experiment.setRandomize(True)
experiments.append(experiment)
# Importance sampling
mean[0] = 4.99689645939288809018e+01
mean[1] = 1.84194175946153282375e+00
mean[2] = 1.04454036676956398821e+01
mean[3] = 4.66776215562709406726e+00
myImportance = ot.Normal(mean, sigma, R)
experiments.append(ot.ImportanceSamplingExperiment(myImportance))
# Randomized LHS
experiment = ot.LHSExperiment()
experiment.setAlwaysShuffle(True)
experiments.append(experiment)

for experiment in experiments:

    ot.RandomGenerator.SetSeed(0)

    myAlgo = ot.ProbabilitySimulationAlgorithm(myEvent, experiment)
    myAlgo.setMaximumOuterSampling(250)
    myAlgo.setBlockSize(4)
    myAlgo.setMaximumCoefficientOfVariation(0.1)

    print('algo=', myAlgo)
示例#6
0
def run_ImportanceSampling(
    event,
    pstar,
    sd=1.0,
    coefVar=0.05,
    outerSampling=1000,
    blockSize=10,
    seed=1234,
    verbose=False,
    failure_domain=None,
):
    """
    Run an importance sampling simulation.

    Parameters
    ----------
    event : openturns.Event
        The failure event.
    pstar : list of points
        Design points in the standard space where to centered the instrumental
        distribution.
    sd : positive float
        The standard deviation of the instrumental distribution.
    coefVar : float
         The target coefficient of variation.
    outerSampling : int
        The maximum number of outer iterations.
        Nb of iterations = outerSampling x blockSize.
    blockSize : int
        The number of samples send to evaluate simultaneously.
    seed : int
        Seed for the openturns random generator.
    logfile : bool
        Enable or not to write the log in ImportanceSampling.log file.
    verbose : bool
        Enable or not the display of the result.
    activeCache : bool
        Enable or not the cache mechanism of the NumericalMathFunction.
    activeHistory : bool
        Enable or not the history mechanism of the NumericalMathFunction.
    failure_domain : string
        Type of failure domain form : either 'union' or 'intersection'. Only
        needed if the event is a list.
    """

    # case with the limit state defined as an intersection
    # or a union of the event
    if type(event) is list:
        n_event = len(event)
        antecedent = event[0].getAntecedent()

        if failure_domain == "union":

            def function_union(X):
                sample = ot.NumericalSample(X.getSize(), n_event)
                for i in range(n_event):
                    sample[:, i] = event[i].getFunction()(X)

                sample = np.array(sample)
                for i in range(n_event):
                    if (event[i].getOperator().getImplementation(
                    ).getClassName() == "Less" or event[i].getOperator(
                    ).getImplementation().getClassName() == "LessOrEqual"):
                        sample[:, i] = sample[:, i] < event[i].getThreshold()
                    if (event[i].getOperator().getImplementation(
                    ).getClassName() == "Greater" or event[i].getOperator(
                    ).getImplementation().getClassName() == "GreaterOrEqual"):
                        sample[:, i] = sample[:, i] >= event[i].getThreshold()
                return np.atleast_2d(sample.sum(axis=1)).T

            model = ot.PythonFunction(
                event[0].getFunction().getInputDimension(),
                event[0].getFunction().getOutputDimension(),
                func_sample=function_union,
            )
            output = ot.RandomVector(model, antecedent)
            event = ot.ThresholdEvent(output, ot.Greater(), 0.0)

        elif failure_domain == "intersection":

            def function_intersection(X):
                sample = ot.NumericalSample(X.getSize(), n_event)
                for i in range(n_event):
                    sample[:, i] = event[i].getFunction()(X)

                sample = np.array(sample)
                for i in range(n_event):
                    if (event[i].getOperator().getImplementation(
                    ).getClassName() == "Less" or event[i].getOperator(
                    ).getImplementation().getClassName() == "LessOrEqual"):
                        sample[:, i] = sample[:, i] < event[i].getThreshold()
                    if (event[i].getOperator().getImplementation(
                    ).getClassName() == "Greater" or event[i].getOperator(
                    ).getImplementation().getClassName() == "GreaterOrEqual"):
                        sample[:, i] = sample[:, i] >= event[i].getThreshold()
                return np.atleast_2d(sample.prod(axis=1)).T

            model = ot.PythonFunction(
                event[0].getFunction().getInputDimension(),
                event[0].getFunction().getOutputDimension(),
                func_sample=function_intersection,
            )
            output = ot.RandomVector(model, antecedent)
            new_event = ot.ThresholdEvent(output, ot.Greater(), 0.0)
    else:
        model = event.getFunction()
        new_event = event

    # Initialize the random generator
    ot.RandomGenerator.SetSeed(seed)

    dim = model.getInputDimension()
    pstar = np.atleast_2d(pstar)
    nPoint = pstar.shape[0]

    stdev = [sd] * dim
    corr = ot.IdentityMatrix(dim)
    if nPoint > 1:
        distribution_list = list()
        for point in pstar:
            distribution_list.append(ot.Normal(point, stdev, corr))
        instrumental_distribution = ot.Mixture(distribution_list)
    elif nPoint == 1:
        instrumental_distribution = ot.Normal(pstar[0], stdev, corr)

    # Run importance sampling simulation
    experiment = ot.ImportanceSamplingExperiment(instrumental_distribution)
    simulation = ot.ProbabilitySimulationAlgorithm(ot.StandardEvent(new_event),
                                                   experiment)
    simulation.setMaximumOuterSampling(outerSampling)
    simulation.setBlockSize(blockSize)
    simulation.setMaximumCoefficientOfVariation(coefVar)

    # try:
    simulation.run()
    # except Exception as e:
    #     dump_cache(model, 'Cache/physicalModelMathFunction')
    #     raise e

    result = simulation.getResult()

    dfResult = pd.DataFrame()
    dfResult = dfResult.append(
        pd.DataFrame([result.getProbabilityEstimate()],
                     index=["Probability of failure"]))
    dfResult = dfResult.append(
        pd.DataFrame(
            [result.getCoefficientOfVariation()],
            index=["Coefficient of varation"],
        ))
    dfResult = dfResult.append(
        pd.DataFrame([result.getConfidenceLength()],
                     index=["95 % Confidence length"]))
    dfResult = dfResult.append(
        pd.DataFrame(
            [result.getOuterSampling() * result.getBlockSize()],
            index=["Number of calls"],
        ))
    dfResult = dfResult.reset_index()
    dfResult.columns = ["", "Results - Importance Sampling"]

    if verbose:
        print(dfResult, "\n")

    return simulation
示例#7
0
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
ot.RandomGenerator.SetSeed(0)

# Generate sample with the given plane
distribution = ot.ComposedDistribution([ot.Uniform(0, 1)] * 2)
size = 10
weightingDistribution = ot.ComposedDistribution([ot.Uniform(0, 1)] * 2)
experiment = ot.ImportanceSamplingExperiment(
    distribution, weightingDistribution, size)
sample = experiment.generate()

# Create an empty graph
graph = ot.Graph("Importance sampling experiment", "x1", "x2", True, "")

# Create the cloud
cloud = ot.Cloud(sample, "blue", "fsquare", "")

# Then, draw it
graph.add(cloud)

fig = plt.figure(figsize=(4, 4))
axis = fig.add_subplot(111)
axis.set_xlim(auto=True)
View(graph, figure=fig, axes=[axis], add_legend=False)
def myImportanceSamplingExperiment(distribution, size, model):
    experiment = ot.ImportanceSamplingExperiment(distribution, distribution,
                                                 size)
    sensitivity_algorithm = ot.SaltelliSensitivityAlgorithm(experiment, model)
    # Fails : this is good
    return sensitivity_algorithm