Exemple #1
0
 def test_UseCaseFORM(self):
     problem = otb.ReliabilityProblem14()
     event = problem.getEvent()
     distribution = event.getAntecedent().getDistribution()
     # We create a NearestPoint algorithm
     myCobyla = ot.Cobyla()
     # Resolution options:
     eps = 1e-3
     myCobyla.setMaximumEvaluationNumber(100)
     myCobyla.setMaximumAbsoluteError(eps)
     myCobyla.setMaximumRelativeError(eps)
     myCobyla.setMaximumResidualError(eps)
     myCobyla.setMaximumConstraintError(eps)
     # For statistics about the algorithm
     algo = ot.FORM(myCobyla, event, distribution.getMean())
     algo.run()
     resultFORM = algo.getResult()
     # Combine with Importance Sampling
     standardSpaceDesignPoint = resultFORM.getStandardSpaceDesignPoint()
     dimension = distribution.getDimension()
     myImportance = ot.Normal(dimension)
     myImportance.setMean(standardSpaceDesignPoint)
     experiment = ot.ImportanceSamplingExperiment(myImportance)
     standardEvent = ot.StandardEvent(event)
     algo = ot.ProbabilitySimulationAlgorithm(standardEvent, experiment)
     algo.setMaximumCoefficientOfVariation(0.01)
     algo.setBlockSize(int(1.0e3))
     algo.setMaximumOuterSampling(int(1e3))
     algo.run()
     result = algo.getResult()
     computed_pf = result.getProbabilityEstimate()
     exact_pf = problem.getProbability()
     print("exact_pf=", exact_pf)
     print("computed_pf=", computed_pf)
     samplesize = result.getOuterSampling() * result.getBlockSize()
     alpha = 0.05
     pflen = result.getConfidenceLength(1 - alpha)
     print(
         "%.2f%% confidence interval = [%f,%f]"
         % ((1 - alpha) * 100, computed_pf - pflen / 2, computed_pf + pflen / 2)
     )
     print("Sample size : ", samplesize)
     atol = 1.0e1 / np.sqrt(samplesize)
     np.testing.assert_allclose(computed_pf, exact_pf, atol=atol)
    def buildFORMIS(self, problem, nearestPointAlgorithm):
        """
        Creates a FORM-IS algorithm.

        We first create a FORM object based on the AbdoRackwitz
        and run it to get the design point in the standard space.
        Then we create an ImportanceSamplingExperiment based on the gaussian
        distribution, centered on the design point.
        Finally, we create a ProbabilitySimulationAlgorithm.

        Parameters
        ----------
        problem : ot.ReliabilityBenchmarkProblem
            The problem.
        nearestPointAlgorithm : ot.OptimizationAlgorithm
            Optimization algorithm used to search the design point.

        Returns
        ----------
        algo : ot.ProbabilitySimulationAlgorithm
            The FORM-IS algorithm for estimating the probability.
        """
        event = problem.getEvent()
        inputVector = event.getAntecedent()
        myDistribution = inputVector.getDistribution()
        physicalStartingPoint = myDistribution.getMean()
        algoFORM = ot.FORM(nearestPointAlgorithm, event, physicalStartingPoint)
        algoFORM.run()
        resultFORM = algoFORM.getResult()
        standardSpaceDesignPoint = resultFORM.getStandardSpaceDesignPoint()
        d = myDistribution.getDimension()
        myImportance = ot.Normal(d)
        myImportance.setMean(standardSpaceDesignPoint)
        experiment = ot.ImportanceSamplingExperiment(myImportance)
        standardEvent = ot.StandardEvent(event)
        algo = ot.ProbabilitySimulationAlgorithm(standardEvent, experiment)
        return algo
# %%
myImportance = ot.Normal(dimension)
myImportance.setMean(standardSpaceDesignPoint)
myImportance

# %%
# Create the design of experiment corresponding to importance sampling. This generates a `WeightedExperiment` with weights corresponding to the importance distribution.

# %%
experiment = ot.ImportanceSamplingExperiment(myImportance)

# %%
# Create the standard event corresponding to the event. This transforms the original problem into the U-space, with Gaussian independent marginals.

# %%
standardEvent = ot.StandardEvent(myEvent)

# %%
# We then create the simulation algorithm.

# %%
algo = ot.ProbabilitySimulationAlgorithm(standardEvent, experiment)
algo.setMaximumCoefficientOfVariation(cv)
algo.setMaximumOuterSampling(40000)

# %%
# For statistics about the algorithm
initialNumberOfCall = limitStateFunction.getEvaluationCallsNumber()

# %%
algo.run()
print("Number of evaluations of the limit-state function: %s" %
      g.getInputHistory().getSize())

# # *Most-probable-failure-point*-based importance sampling

# In[32]:

g.clearHistory()

# In[33]:

instrumental_distribution = ot.Normal(
    FORM_result.getStandardSpaceDesignPoint(),
    ot.CovarianceMatrix(X_distribution.getDimension()))
IS_experiment = ot.ImportanceSamplingExperiment(instrumental_distribution)
IS_algorithm = ot.ProbabilitySimulationAlgorithm(ot.StandardEvent(event),
                                                 IS_experiment)
IS_algorithm.setMaximumOuterSampling(40000)
IS_algorithm.setBlockSize(1)
IS_algorithm.setMaximumCoefficientOfVariation(.1)
IS_algorithm.run()
IS_result = IS_algorithm.getResult()

# In[34]:

print("Probability estimate: %.3e" % IS_result.getProbabilityEstimate())
print("Coefficient of variation: %.2f" % IS_result.getCoefficientOfVariation())
print("Number of evaluations: %d" % g.getInputHistory().getSize())

# In[35]:
myImportance = ot.Normal(dimension)
myImportance.setMean(standardSpaceDesignPoint)
myImportance

# %%
# Create the design of experiment corresponding to importance sampling. This generates a `WeightedExperiment` with weights fitting to the importance distribution.

# %%
experiment = ot.ImportanceSamplingExperiment(myImportance)
type(experiment)

# %%
# Create the standard event corresponding to the event. This pushes the original problem to the U-space, with Gaussian independent marginals.

# %%
standardEvent = ot.StandardEvent(event)

# %%
# Run the importance sampling simulation
# --------------------------------------

# %%
# We then create the simulation algorithm.

# %%
algo = ot.ProbabilitySimulationAlgorithm(standardEvent, experiment)
algo.setMaximumCoefficientOfVariation(0.1)
algo.setMaximumOuterSampling(40000)
algo.run()

# %%
Exemple #6
0
def run_ImportanceSampling(
    event,
    pstar,
    sd=1.0,
    coefVar=0.05,
    outerSampling=1000,
    blockSize=10,
    seed=1234,
    verbose=False,
    failure_domain=None,
):
    """
    Run an importance sampling simulation.

    Parameters
    ----------
    event : openturns.Event
        The failure event.
    pstar : list of points
        Design points in the standard space where to centered the instrumental
        distribution.
    sd : positive float
        The standard deviation of the instrumental distribution.
    coefVar : float
         The target coefficient of variation.
    outerSampling : int
        The maximum number of outer iterations.
        Nb of iterations = outerSampling x blockSize.
    blockSize : int
        The number of samples send to evaluate simultaneously.
    seed : int
        Seed for the openturns random generator.
    logfile : bool
        Enable or not to write the log in ImportanceSampling.log file.
    verbose : bool
        Enable or not the display of the result.
    activeCache : bool
        Enable or not the cache mechanism of the NumericalMathFunction.
    activeHistory : bool
        Enable or not the history mechanism of the NumericalMathFunction.
    failure_domain : string
        Type of failure domain form : either 'union' or 'intersection'. Only
        needed if the event is a list.
    """

    # case with the limit state defined as an intersection
    # or a union of the event
    if type(event) is list:
        n_event = len(event)
        antecedent = event[0].getAntecedent()

        if failure_domain == "union":

            def function_union(X):
                sample = ot.NumericalSample(X.getSize(), n_event)
                for i in range(n_event):
                    sample[:, i] = event[i].getFunction()(X)

                sample = np.array(sample)
                for i in range(n_event):
                    if (event[i].getOperator().getImplementation(
                    ).getClassName() == "Less" or event[i].getOperator(
                    ).getImplementation().getClassName() == "LessOrEqual"):
                        sample[:, i] = sample[:, i] < event[i].getThreshold()
                    if (event[i].getOperator().getImplementation(
                    ).getClassName() == "Greater" or event[i].getOperator(
                    ).getImplementation().getClassName() == "GreaterOrEqual"):
                        sample[:, i] = sample[:, i] >= event[i].getThreshold()
                return np.atleast_2d(sample.sum(axis=1)).T

            model = ot.PythonFunction(
                event[0].getFunction().getInputDimension(),
                event[0].getFunction().getOutputDimension(),
                func_sample=function_union,
            )
            output = ot.RandomVector(model, antecedent)
            event = ot.ThresholdEvent(output, ot.Greater(), 0.0)

        elif failure_domain == "intersection":

            def function_intersection(X):
                sample = ot.NumericalSample(X.getSize(), n_event)
                for i in range(n_event):
                    sample[:, i] = event[i].getFunction()(X)

                sample = np.array(sample)
                for i in range(n_event):
                    if (event[i].getOperator().getImplementation(
                    ).getClassName() == "Less" or event[i].getOperator(
                    ).getImplementation().getClassName() == "LessOrEqual"):
                        sample[:, i] = sample[:, i] < event[i].getThreshold()
                    if (event[i].getOperator().getImplementation(
                    ).getClassName() == "Greater" or event[i].getOperator(
                    ).getImplementation().getClassName() == "GreaterOrEqual"):
                        sample[:, i] = sample[:, i] >= event[i].getThreshold()
                return np.atleast_2d(sample.prod(axis=1)).T

            model = ot.PythonFunction(
                event[0].getFunction().getInputDimension(),
                event[0].getFunction().getOutputDimension(),
                func_sample=function_intersection,
            )
            output = ot.RandomVector(model, antecedent)
            new_event = ot.ThresholdEvent(output, ot.Greater(), 0.0)
    else:
        model = event.getFunction()
        new_event = event

    # Initialize the random generator
    ot.RandomGenerator.SetSeed(seed)

    dim = model.getInputDimension()
    pstar = np.atleast_2d(pstar)
    nPoint = pstar.shape[0]

    stdev = [sd] * dim
    corr = ot.IdentityMatrix(dim)
    if nPoint > 1:
        distribution_list = list()
        for point in pstar:
            distribution_list.append(ot.Normal(point, stdev, corr))
        instrumental_distribution = ot.Mixture(distribution_list)
    elif nPoint == 1:
        instrumental_distribution = ot.Normal(pstar[0], stdev, corr)

    # Run importance sampling simulation
    experiment = ot.ImportanceSamplingExperiment(instrumental_distribution)
    simulation = ot.ProbabilitySimulationAlgorithm(ot.StandardEvent(new_event),
                                                   experiment)
    simulation.setMaximumOuterSampling(outerSampling)
    simulation.setBlockSize(blockSize)
    simulation.setMaximumCoefficientOfVariation(coefVar)

    # try:
    simulation.run()
    # except Exception as e:
    #     dump_cache(model, 'Cache/physicalModelMathFunction')
    #     raise e

    result = simulation.getResult()

    dfResult = pd.DataFrame()
    dfResult = dfResult.append(
        pd.DataFrame([result.getProbabilityEstimate()],
                     index=["Probability of failure"]))
    dfResult = dfResult.append(
        pd.DataFrame(
            [result.getCoefficientOfVariation()],
            index=["Coefficient of varation"],
        ))
    dfResult = dfResult.append(
        pd.DataFrame([result.getConfidenceLength()],
                     index=["95 % Confidence length"]))
    dfResult = dfResult.append(
        pd.DataFrame(
            [result.getOuterSampling() * result.getBlockSize()],
            index=["Number of calls"],
        ))
    dfResult = dfResult.reset_index()
    dfResult.columns = ["", "Results - Importance Sampling"]

    if verbose:
        print(dfResult, "\n")

    return simulation