Exemple #1
0
def interpret_complete_cdf(
    cdfs_p: List[Union[list, np.ndarray]],
    cdfs_v: List[Union[list, np.ndarray]],
    distribution: str = None,
) -> Union[List[Union[list, np.ndarray]], Tuple[List[Union[list, np.ndarray]],
                                                List[Union[list, np.ndarray]]],
           ot.DistributionImplementation, ]:
    """Interpret the given points on the cumulative distribution function to represent a complete CDF. The default
    policy is to assume discrete probabilities.
    If a distribution name is specified, the CDF is returned as an openturns distribution object.
    Supported openturns distributions are the following:
    - discrete: all residual probability is attributed to the highest given value
    - normal or gaussian: derived from the first two point only
    - uniform: interpolates linearly between points, with residual probability attributed to the min and max values
    """
    # Todo: refactor, currently too many possible types of output

    if distribution is None:
        for cdf_p in cdfs_p:
            cdf_p[-1] = 1  # Last value is the highest
        return cdfs_p, cdfs_v
    cdfs = []
    if distribution == "discrete":
        for cdf_p, cdf_v in zip(cdfs_p, cdfs_v):
            cdf_p[-1] = 1  # Last value is the highest
            cdfs.append(ot.UserDefined([[v] for v in cdf_v], cp_to_p(cdf_p)))
    elif distribution in ["normal", "gaussian"]:
        for cdf_p, cdf_v in zip(cdfs_p, cdfs_v):
            if len(cdf_v) > 1:
                x1 = cdf_v[0]
                x2 = cdf_v[1]
                y1 = cdf_p[0]
                y2 = cdf_p[1]
                mu = (x1 * pyerf.erfinv(1 - 2 * y2) -
                      x2 * pyerf.erfinv(1 - 2 * y1)) / (
                          pyerf.erfinv(1 - 2 * y2) - pyerf.erfinv(1 - 2 * y1))
                sigma = (2**0.5 * x1 -
                         2**0.5 * x2) / (2 * pyerf.erfinv(1 - 2 * y2) -
                                         2 * pyerf.erfinv(1 - 2 * y1))
                cdfs.append(ot.Normal(mu, sigma))
            else:
                cdfs.append(ot.UserDefined([[v] for v in cdf_v], cdf_p))
    elif distribution == "uniform":
        for cdf_p, cdf_v in zip(cdfs_p, cdfs_v):
            if len(cdf_v) == 1:
                cdfs.append(ot.UserDefined([cdf_v]))
            elif len(cdf_v) > 1:
                coll = ([ot.UserDefined([[cdf_v[0]]])] + [
                    ot.Uniform(float(cdf_v[i]), float(cdf_v[i + 1]))
                    for i in range(len(cdf_v) - 1)
                ] + [ot.UserDefined([[cdf_v[-1]]])])
                weights = np.append(cp_to_p(cdf_p), 1 - cdf_p[-1])
                cdfs.append(ot.Mixture(coll, weights))
    else:
        return NotImplementedError
    return cdfs
Exemple #2
0
    def _computePODSamplePerDefect(self, defect, detection, krigingResult,
                                   transformation, distribution,
                                   simulationSize, samplingSize):
        """
        Compute the POD sample for a defect size.
        """

        dim = distribution.getDimension()
        # create a distibution with a dirac distribution for the defect size
        diracDist = [ot.Dirac(defect)]
        diracDist += [distribution.getMarginal(i + 1) for i in range(dim - 1)]
        distribution = ot.ComposedDistribution(diracDist)

        # create a sample for the Monte Carlo simulation and confidence interval
        MC_sample = distribution.getSample(samplingSize)
        # Kriging_RV = ot.KrigingRandomVector(krigingResult, MC_sample)
        # Y_sample = Kriging_RV.getSample(simulationSize)
        Y_sample = self._randomVectorSampling(krigingResult,
                                              transformation(MC_sample),
                                              simulationSize, samplingSize)

        # compute the POD for all simulation size
        POD_MCPG_a = np.mean(Y_sample > detection, axis=1)
        # compute the variance of the MC simulation using TCL
        VAR_TCL = np.array(POD_MCPG_a) * (
            1 - np.array(POD_MCPG_a)) / Y_sample.shape[1]
        # Create distribution of the POD estimator for all simulation
        POD_PG_dist = []
        for i in range(simulationSize):
            if VAR_TCL[i] > 0:
                POD_PG_dist += [ot.Normal(POD_MCPG_a[i], np.sqrt(VAR_TCL[i]))]
            else:
                if POD_MCPG_a[i] < 1:
                    POD_PG_dist += [ot.Dirac([0.])]
                else:
                    POD_PG_dist += [ot.Dirac([1.])]
        POD_PG_alea = ot.Mixture(POD_PG_dist)
        # get a sample of these distributions
        POD_PG_sample = POD_PG_alea.getSample(simulationSize * samplingSize)

        return POD_PG_sample
    def compute_aux_distribution(self, sample, weights):

        neff = np.sum(weights)**2 / np.sum(weights**2)  #computation of weight

        # computation of bandwidth using Silverman rule
        silverman = sample.computeStandardDeviationPerComponent() * (
            neff * (self.dim + 2) / 4.)**(-1. / (self.dim + 4))

        margins = []

        # computation of auxiliary distribution using ot.Mixture
        for k in range(self.dim):
            dist_coll = []
            for i in range(self.n_IS):
                dist_coll.append(ot.Normal(sample[i][k], silverman[k]))

            distri_margin = ot.Mixture(dist_coll, weights.tolist()[0])
            margins.append(distri_margin)

        aux_distrib = ot.ComposedDistribution(margins)
        return aux_distrib
 def test_CrossCutDistribution2(self):
     # Create a Funky distribution
     corr = ot.CorrelationMatrix(2)
     corr[0, 1] = 0.2
     copula = ot.NormalCopula(corr)
     x1 = ot.Normal(-1.0, 1.0)
     x2 = ot.Normal(2.0, 1.0)
     x_funk = ot.ComposedDistribution([x1, x2], copula)
     # Create a Punk distribution
     x1 = ot.Normal(1.0, 1.0)
     x2 = ot.Normal(-2.0, 1.0)
     x_punk = ot.ComposedDistribution([x1, x2], copula)
     distribution = ot.Mixture([x_funk, x_punk], [0.5, 1.0])
     referencePoint = distribution.getMean()
     crossCut = otbenchmark.CrossCutDistribution(distribution)
     # Avoid failing on CircleCi
     # _tkinter.TclError: no display name and no $DISPLAY environment variable
     try:
         _ = crossCut.drawMarginalPDF()
         _ = crossCut.drawConditionalPDF(referencePoint)
     except Exception as e:
         print(e)
    return graph


# %%
# The `computeMinimumVolumeInterval` returns an `Interval`.

# %%
graph = drawPDFAndInterval1D(n, interval, alpha)
view = viewer.View(graph)

# %%
# With a Mixture, minimum volume LevelSet
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

# %%
m = ot.Mixture([ot.Normal(-5., 1.), ot.Normal(5., 1.)], [0.2, 0.8])

# %%
graph = m.drawPDF()
view = viewer.View(graph)

# %%
alpha = 0.9
levelSet, threshold = m.computeMinimumVolumeLevelSetWithThreshold(alpha)
threshold

# %%
# The interesting point is that a `LevelSet` may be non-contiguous. In the current mixture example, this is not an interval.

# %%
graph = drawLevelSet1D(m, levelSet, alpha, threshold, 1000)
aCollection = ot.DistributionCollection()

aCollection.add(ot.Normal(meanPoint, sigma, ot.IdentityMatrix(dimension)))
sample.add(meanPoint)
meanPoint += [1.0] * dimension
aCollection.add(ot.Normal(meanPoint, sigma, ot.IdentityMatrix(dimension)))
sample.add(meanPoint)
meanPoint += [1.0] * dimension
aCollection.add(ot.Normal(meanPoint, sigma, ot.IdentityMatrix(dimension)))
sample.add(meanPoint)

# Instantiate one distribution object
distribution = ot.KernelMixture(ot.Normal(), sigma, sample)
print("Distribution ", repr(distribution))
print("Distribution ", distribution)
distributionRef = ot.Mixture(aCollection)

# Is this distribution elliptical ?
print("Elliptical = ", distribution.isElliptical())

# Is this distribution continuous ?
print("Continuous = ", distribution.isContinuous())

# Test for realization of distribution
oneRealization = distribution.getRealization()
print("oneRealization=", repr(oneRealization))

# Test for sampling
size = 100
oneSample = distribution.getSample(size)
print("oneSample first=", repr(oneSample[0]), " last=",
Exemple #7
0
# Discretization on grid mu, mu + \sigma
newGrid = boxGrid.generate()
# scaling box grid
newGrid *= dist_2D.getStandardDeviation()
# translating
newGrid += dist_2D.getMean()
# Compute PDF
for index in range(newGrid.getSize()):
    point = newGrid[index]
    PDF = dist_2D.computePDF(point)
    print("pdf      = %.6g" % PDF)

# 3D test
ot.ResourceMap.SetAsUnsignedInteger("RandomMixture-DefaultMaxSize", 8290688)
mixture = ot.Mixture([ot.Normal(2, 1), ot.Normal(-2, 1)])
collection = [ot.Normal(0.0, 1.0), mixture, ot.Uniform(0, 1), ot.Uniform(0, 1)]
matrix = ot.Matrix([[1, -0.05, 1, -0.5], [0.5, 1, -0.05, 0.3],
                    [-0.5, -0.1, 1.2, -0.8]])
dist_3D = ot.RandomMixture(collection, matrix)
dist_3D.setBlockMin(3)
dist_3D.setBlockMax(6)

print("3D distribution = ", dist_3D)
print("range = ", dist_3D.getRange())
print("mean = ", dist_3D.getMean())
print("cov = ", dist_3D.getCovariance())
print("sigma = ", dist_3D.getStandardDeviation())
# Total number of points (is (2+2)**3)
# Test is CPU consuming
N = 2
def CostSobol(MyModel, p, m, lower, upper, distribution, indexNumber,
              indexChoice, NSobol, MINMAX):
    '''
    Return the associated sobol index to the measure recovered from the canonical moment sequences
    '''
    dim = len(lower)
    # We concatenate p per block of variable
    if len(m) == dim:
        pp = []
        t = 0
        for i in range(dim):
            pp.append(p[t:t + len(m[i]) + 1])
            t = t + len(m[i]) + 1
    else:
        print('error size of moment vector')

    if indexChoice == 1:
        P = list(
            QD_Algorithm(
                Affine_Transformation(lower[indexNumber], upper[indexNumber],
                                      m[indexNumber]))) + list(pp[indexNumber])
        Position, Weight = Canonical_to_Position([lower[indexNumber]],
                                                 [upper[indexNumber]], P)

        distribution[indexNumber] = ot.Mixture(
            [ot.Dirac(Position[i]) for i in range(len(Position))], Weight)
        composedDistribution = ot.ComposedDistribution(distribution)
        ot.RandomGenerator.SetSeed(0)
        inputDesign = ot.SobolIndicesExperiment(composedDistribution, NSobol,
                                                True).generate()
        outputDesign = MyModel(inputDesign)

        sensitivityAnalysis = ot.SaltelliSensitivityAlgorithm(
            inputDesign, outputDesign, NSobol)
        firstOrder = sensitivityAnalysis.getFirstOrderIndices()
        return MINMAX * firstOrder[indexNumber]

    elif indexChoice == 0:
        t = 0
        P = [[]] * (dim - 1)
        Position = [[]] * (dim - 1)
        Weight = [[]] * (dim - 1)
        for i in range(dim):
            if i != indexNumber:
                P[t] = list(
                    QD_Algorithm(
                        Affine_Transformation(lower[i], upper[i],
                                              m[i]))) + list(pp[i])
                Position[t], Weight[t] = Canonical_to_Position([lower[i]],
                                                               [upper[i]],
                                                               P[t])
                distribution[i] = ot.Mixture([
                    ot.Dirac(Position[t][j]) for j in range(len(Position[t]))
                ], Weight[t])
                t += 1
        composedDistribution = ot.ComposedDistribution(distribution)
        ot.RandomGenerator.SetSeed(0)
        inputDesign = ot.SobolIndicesExperiment(composedDistribution, NSobol,
                                                True).generate()
        outputDesign = MyModel(inputDesign)

        sensitivityAnalysis = ot.SaltelliSensitivityAlgorithm(
            inputDesign, outputDesign, NSobol)
        totalOrder = sensitivityAnalysis.getTotalOrderIndices()
        return MINMAX * totalOrder[indexNumber]
Exemple #9
0
sigma[1] = 3.0
R = ot.CorrelationMatrix(dimension)
for i in range(1, dimension):
    R[i, i - 1] = 0.5

# Create a collection of distribution
aCollection = ot.DistributionCollection()

aCollection.add(ot.Normal(meanPoint, sigma, R))
meanPoint += ot.Point(meanPoint.getDimension(), 1.0)
aCollection.add(ot.Normal(meanPoint, sigma, R))
meanPoint += ot.Point(meanPoint.getDimension(), 1.0)
aCollection.add(ot.Normal(meanPoint, sigma, R))

# Instantiate one distribution object
distribution = ot.Mixture(aCollection, ot.Point(aCollection.getSize(), 2.0))
print("Distribution ", repr(distribution))
print("Weights = ", repr(distribution.getWeights()))
weights = distribution.getWeights()
weights[0] = 2.0 * weights[0]
distribution.setWeights(weights)
print("After update, new weights = ", repr(distribution.getWeights()))
distribution = ot.Mixture(aCollection)
print("Distribution ", repr(distribution))

# Is this distribution elliptical ?
print("Elliptical = ", distribution.isElliptical())

# Is this distribution continuous ?
print("Continuous = ", distribution.isContinuous())
Exemple #10
0
copula = ot.NormalCopula(corr)
x1 = ot.Normal(-1., 1)
x2 = ot.Normal(2, 1)
x_funk = ot.ComposedDistribution([x1, x2], copula)

# %%
# Create a Punk distribution
x1 = ot.Normal(1., 1)
x2 = ot.Normal(-2, 1)
x_punk = ot.ComposedDistribution([x1, x2], copula)

# %%
# Let us mix these two distributions.

# %%
mixture = ot.Mixture([x_funk, x_punk], [0.5, 1.])

# %%
n = 500
sample = mixture.getSample(n)

# %%
graph = ot.Graph("n=%d" % (n), "X1", "X2", True, '')
cloud = ot.Cloud(sample)
graph.add(cloud)
view = viewer.View(graph)

# %%
# We sometimes want to customize the graphics by choosing the type of point (square, triangle, circle, etc...), of line (continuous, dashed, etc...) or another parameter. We can know the list of possible values with the corresponding `getValid` method.
#
# For example, the following function returns the possible values of the `PointStyle` parameter.
Exemple #11
0
import openturns.viewer as viewer
from matplotlib import pylab as plt
ot.Log.Show(ot.Log.NONE)

# %%
# create a collection of distribution and the associated weights
distributions = [
    ot.Triangular(1.0, 2.0, 4.0),
    ot.Normal(-1.0, 1.0),
    ot.Uniform(5.0, 6.0)
]
weights = [0.4, 1.0, 0.2]

# %%
# create the mixture
distribution = ot.Mixture(distributions, weights)
print(distribution)

# %%
# draw PDF
graph = distribution.drawPDF()
view = viewer.View(graph)

# %%
# define a list of copulas and the associated weights
copulas = [ot.GumbelCopula(4.5), ot.ClaytonCopula(2.3)]
weights = [0.2, 0.8]

# %%
# create a mixture of copulas
distribution = ot.Mixture(copulas, weights)
graph.setTitle("Christian Robert tough density")
graph.setXTitle("")
graph.setYTitle("")
_ = View(graph)

# %%
# Independent Metropolis-Hastings
# -------------------------------
# Let us use a mixture distribution to approximate the target distribution.
#
# This approximation will serve as the instrumental distribution
# in the independent Metropolis-Hastings algorithm.

exp = ot.Exponential(1.0)
unif = ot.Normal(5.3, 0.4)
instrumentalDistribution = ot.Mixture([exp, unif], [0.9, 0.1])

# %%
# Compare the instrumental density to the target density.
graph = f.draw(lower_bound, upper_bound, 100)
graph.setTitle("Instrumental PDF")
graph.setXTitle("")
graph.setYTitle("")
graph.add(instrumentalDistribution.drawPDF(lower_bound, upper_bound, 100))
graph.setLegendPosition("topright")
graph.setLegends(["Unnormalized target density", "Instrumental PDF"])
_ = View(graph)

# %%
# :class:`~MetropolisHastings` and derived classes can work directly with the logarithm of the target density.
Exemple #13
0
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot

R = ot.CorrelationMatrix(2)
R[0, 1] = -0.99
d1 = ot.Normal([-1.0, 1.0], [1.0, 1.0], R)
R[0, 1] = 0.99
d2 = ot.Normal([1.0, 1.0], [1.0, 1.0], R)
distribution = ot.Mixture([d1, d2], [1.0] * 2)
classifier = ot.MixtureClassifier(distribution)
f1 = ot.SymbolicFunction(['x'], ['-x'])
f2 = ot.SymbolicFunction(['x'], ['x'])
experts = ot.Basis([f1, f2])
moe = ot.ExpertMixture(experts, classifier)
moeNMF = ot.Function(moe)

print('Mixture of experts=', moe)

# Evaluate the mixture of experts on some points
for i in range(2):
    p = [-0.3 + 0.8 * i / 4.0]
    print('moe   ( %.6g )=' % p[0], moe(p))
    print('moeNMF( %.6g )=' % p[0], moeNMF(p))
# and on a sample
x = [[-0.3], [0.1]]
print('x=', ot.Sample(x), 'moeNMF(x)=', moeNMF(x))

# non-supervised mode (2d)
f1 = ot.SymbolicFunction(['x1', 'x2'], ['-8'])
lower_bound = 0.0
print(lower_bound)
upper_bound = 2.0 * pi
print(upper_bound)

# %%
# Independent Metropolis-Hastings
# -------------------------------
# Let us use a mixture distribution to approximate the target distribution.
#
# This approximation will serve as the instrumental distribution
# in the independent Metropolis-Hastings algorithm.

exp = ot.Exponential(1.0)
normal = ot.Normal(5.3, 0.4)
instrumentalDistribution = ot.Mixture([exp, normal], [0.9, 0.1])
print(instrumentalDistribution.getWeights())

# %%
# MetropolisHastings classes expect to receive the logarithm to the target density.

log_density = ot.SymbolicFunction(
    "x", "log(2 + sin(x)^2) - (2 + cos(3*x)^3 + sin(2*x)^3) * x")

initialState = ot.Point([3.0])  # not important in this case
support = ot.Interval([lower_bound], [upper_bound])
independentMH = ot.IndependentMetropolisHastings(log_density, support,
                                                 initialState,
                                                 instrumentalDistribution, [0])

print(independentMH.getRealization())
# Bandwidth selection
# -------------------
#
# We reproduce a classical example of the literature : the fitting of a bimodal distribution.
# We will show the result of a kernel smoothing with different bandwidth computation :
#
# - the Silverman rule
# - the Plugin bandwidth
# - the Mixed bandwidth
#

# %%
# We define the bimodal distribution and generate a sample out of it.
X1 = ot.Normal(10.0, 1.0)
X2 = ot.Normal(-10.0, 1.0)
myDist = ot.Mixture([X1, X2])
sample = myDist.getSample(2000)

# %%
# We now compare the fitted distribution :
graph = myDist.drawPDF()
graph.setTitle("Kernel smoothing vs original")

# %%
# With the Silverman rule :
kernelSB = ot.KernelSmoothing()
bandwidthSB = kernelSB.computeSilvermanBandwidth(sample)
estimatedSB = kernelSB.build(sample, bandwidthSB)
kernelSB_plot = estimatedSB.drawPDF().getDrawable(0)
graph.add(kernelSB_plot)
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View

mu = [2.0, 2.0]
sigma = [1.0, 1.0]
R = ot.CorrelationMatrix(2)
R[0, 1] = 0.8
myNormal1 = ot.Normal(mu, sigma, R)
R2 = ot.CorrelationMatrix(2)
R2[0, 1] = -0.8
mu2 = [3.0, 3.0]
myNormal2 = ot.Normal(mu2, sigma, R2)

myMixture = ot.Mixture([myNormal1, myNormal2], [0.3, 0.7])
myMixture.setDescription(['$x_1$', '$x_2$'])

graphPDF = myMixture.drawPDF()
graphCDF = myMixture.drawCDF()

fig = plt.figure(figsize=(8, 4))
pdf_axis = fig.add_subplot(121)
cdf_axis = fig.add_subplot(122)
pdf_axis.set_xlim(auto=True)
cdf_axis.set_xlim(auto=True)

View(graphPDF, figure=fig, axes=[pdf_axis], add_legend=True)
View(graphCDF, figure=fig, axes=[cdf_axis], add_legend=True)
fig.suptitle("Mixture: 0.3*Normal1 + 0.7*Normal2: pdf and cdf")
import openturns.testing as ott
import numpy as np

ot.TESTPREAMBLE()

ot.RandomGenerator.SetSeed(100)

# Sample data with :math:`\mu_0 = 0` and :math:`\mu_1 = 2.7`.
N = 500
p = 0.3

mu0 = 0.0
mu1 = 2.7
nor0 = ot.Normal(mu0, 1.0)
nor1 = ot.Normal(mu1, 1.0)
true_distribution = ot.Mixture([nor0, nor1], [1 - p, p])
observations = np.array(true_distribution.getSample(500))


def nor0post(pt):
    z = np.array(pt)[2:]
    x0 = observations[z == 0]
    mu0 = x0.sum() / (0.1 + len(x0))
    sigma0 = 1.0 / (0.1 + len(x0))
    return [mu0, sigma0]


def nor1post(pt):
    z = np.array(pt)[2:]
    x1 = observations[z == 1]
    mu1 = x1.sum() / (0.1 + len(x1))
# %%
graph = fit.drawPDF()
view = otv.View(graph)

# %%
# We see that the distribution of the merged sample has two modes. However, these modes are not clearly distinct. To distinguish them, we could increase the sample size. However, it might be interesting to see if the bandwidth selection rule can be better chosen: this is the purpose of the next section.

# %%
# Simulation based on a mixture
# -----------------------------
#
# Since the distribution that we approximate is a mixture, it will be more convenient to create it from the `Mixture` class. It takes as input argument a list of distributions and a list of weights. 

# %%
distribution = ot.Mixture([distribution1, distribution2], [w1, w2])

# %%
# Then we generate a sample from it. 

# %%
sample = distribution.getSample(n)

# %%
factory = ot.KernelSmoothing()
fit = factory.build(sample)

# %%
factory.getBandwidth()

# %%
# Create a Funky distribution
corr = ot.CorrelationMatrix(3)
corr[0, 1] = 0.2
corr[1, 2] = -0.3
copula = ot.NormalCopula(corr)
x1 = ot.Normal(-1., 1)
x2 = ot.Normal(2, 1)
x3 = ot.Normal(-2, 1)
x_funk = ot.ComposedDistribution([x1, x2, x3], copula)

# Create a Punk distribution
x1 = ot.Normal(1., 1)
x2 = ot.Normal(-2, 1)
x3 = ot.Normal(3, 1)
x_punk = ot.ComposedDistribution([x1, x2, x3], copula)

# Merge the distributions
distribution = ot.Mixture([x_funk, x_punk], [0.5, 1.])

# Sample from the mixture
n = 500
sample = distribution.getSample(n)

myGraph = ot.Graph('Sample n=%d' % (n), ' ', ' ', True, '')
myPairs = ot.Pairs(sample, 'Pairs', sample.getDescription(), 'blue', 'bullet')
myGraph.add(myPairs)
View(myGraph)

sample.exportToCSVFile("gauss-mixture-3D.csv")
Exemple #20
0
def run_ImportanceSampling(
    event,
    pstar,
    sd=1.0,
    coefVar=0.05,
    outerSampling=1000,
    blockSize=10,
    seed=1234,
    verbose=False,
    failure_domain=None,
):
    """
    Run an importance sampling simulation.

    Parameters
    ----------
    event : openturns.Event
        The failure event.
    pstar : list of points
        Design points in the standard space where to centered the instrumental
        distribution.
    sd : positive float
        The standard deviation of the instrumental distribution.
    coefVar : float
         The target coefficient of variation.
    outerSampling : int
        The maximum number of outer iterations.
        Nb of iterations = outerSampling x blockSize.
    blockSize : int
        The number of samples send to evaluate simultaneously.
    seed : int
        Seed for the openturns random generator.
    logfile : bool
        Enable or not to write the log in ImportanceSampling.log file.
    verbose : bool
        Enable or not the display of the result.
    activeCache : bool
        Enable or not the cache mechanism of the NumericalMathFunction.
    activeHistory : bool
        Enable or not the history mechanism of the NumericalMathFunction.
    failure_domain : string
        Type of failure domain form : either 'union' or 'intersection'. Only
        needed if the event is a list.
    """

    # case with the limit state defined as an intersection
    # or a union of the event
    if type(event) is list:
        n_event = len(event)
        antecedent = event[0].getAntecedent()

        if failure_domain == "union":

            def function_union(X):
                sample = ot.NumericalSample(X.getSize(), n_event)
                for i in range(n_event):
                    sample[:, i] = event[i].getFunction()(X)

                sample = np.array(sample)
                for i in range(n_event):
                    if (event[i].getOperator().getImplementation(
                    ).getClassName() == "Less" or event[i].getOperator(
                    ).getImplementation().getClassName() == "LessOrEqual"):
                        sample[:, i] = sample[:, i] < event[i].getThreshold()
                    if (event[i].getOperator().getImplementation(
                    ).getClassName() == "Greater" or event[i].getOperator(
                    ).getImplementation().getClassName() == "GreaterOrEqual"):
                        sample[:, i] = sample[:, i] >= event[i].getThreshold()
                return np.atleast_2d(sample.sum(axis=1)).T

            model = ot.PythonFunction(
                event[0].getFunction().getInputDimension(),
                event[0].getFunction().getOutputDimension(),
                func_sample=function_union,
            )
            output = ot.RandomVector(model, antecedent)
            event = ot.ThresholdEvent(output, ot.Greater(), 0.0)

        elif failure_domain == "intersection":

            def function_intersection(X):
                sample = ot.NumericalSample(X.getSize(), n_event)
                for i in range(n_event):
                    sample[:, i] = event[i].getFunction()(X)

                sample = np.array(sample)
                for i in range(n_event):
                    if (event[i].getOperator().getImplementation(
                    ).getClassName() == "Less" or event[i].getOperator(
                    ).getImplementation().getClassName() == "LessOrEqual"):
                        sample[:, i] = sample[:, i] < event[i].getThreshold()
                    if (event[i].getOperator().getImplementation(
                    ).getClassName() == "Greater" or event[i].getOperator(
                    ).getImplementation().getClassName() == "GreaterOrEqual"):
                        sample[:, i] = sample[:, i] >= event[i].getThreshold()
                return np.atleast_2d(sample.prod(axis=1)).T

            model = ot.PythonFunction(
                event[0].getFunction().getInputDimension(),
                event[0].getFunction().getOutputDimension(),
                func_sample=function_intersection,
            )
            output = ot.RandomVector(model, antecedent)
            new_event = ot.ThresholdEvent(output, ot.Greater(), 0.0)
    else:
        model = event.getFunction()
        new_event = event

    # Initialize the random generator
    ot.RandomGenerator.SetSeed(seed)

    dim = model.getInputDimension()
    pstar = np.atleast_2d(pstar)
    nPoint = pstar.shape[0]

    stdev = [sd] * dim
    corr = ot.IdentityMatrix(dim)
    if nPoint > 1:
        distribution_list = list()
        for point in pstar:
            distribution_list.append(ot.Normal(point, stdev, corr))
        instrumental_distribution = ot.Mixture(distribution_list)
    elif nPoint == 1:
        instrumental_distribution = ot.Normal(pstar[0], stdev, corr)

    # Run importance sampling simulation
    experiment = ot.ImportanceSamplingExperiment(instrumental_distribution)
    simulation = ot.ProbabilitySimulationAlgorithm(ot.StandardEvent(new_event),
                                                   experiment)
    simulation.setMaximumOuterSampling(outerSampling)
    simulation.setBlockSize(blockSize)
    simulation.setMaximumCoefficientOfVariation(coefVar)

    # try:
    simulation.run()
    # except Exception as e:
    #     dump_cache(model, 'Cache/physicalModelMathFunction')
    #     raise e

    result = simulation.getResult()

    dfResult = pd.DataFrame()
    dfResult = dfResult.append(
        pd.DataFrame([result.getProbabilityEstimate()],
                     index=["Probability of failure"]))
    dfResult = dfResult.append(
        pd.DataFrame(
            [result.getCoefficientOfVariation()],
            index=["Coefficient of varation"],
        ))
    dfResult = dfResult.append(
        pd.DataFrame([result.getConfidenceLength()],
                     index=["95 % Confidence length"]))
    dfResult = dfResult.append(
        pd.DataFrame(
            [result.getOuterSampling() * result.getBlockSize()],
            index=["Number of calls"],
        ))
    dfResult = dfResult.reset_index()
    dfResult.columns = ["", "Results - Importance Sampling"]

    if verbose:
        print(dfResult, "\n")

    return simulation
Exemple #21
0
import sys
import openturns.testing as ott
import math as m

ot.TESTPREAMBLE()

ot.ResourceMap.SetAsBool('Distribution-MinimumVolumeLevelSetBySampling', True)
ot.ResourceMap.SetAsUnsignedInteger(
    'Distribution-MinimumVolumeLevelSetSamplingSize', 500)

# 2-d test
dists = [
    ot.Normal([-1.0, 2.0], [1.0] * 2, ot.CorrelationMatrix(2)),
    ot.Normal([1.0, -2.0], [1.5] * 2, ot.CorrelationMatrix(2))
]
mixture = ot.Mixture(dists)

# 3-d test
R1 = ot.CovarianceMatrix(3)
R1[2, 1] = -0.25
R2 = ot.CovarianceMatrix(3)
R2[1, 0] = 0.5
R2[2, 1] = -0.3
R2[0, 0] = 1.3
print(R2)
dists = [ot.Normal([1.0, -2.0, 3.0], R1), ot.Normal([-1.0, 2.0, -2.0], R2)]
mixture = ot.Mixture(dists, [2.0 / 3.0, 1.0 / 3.0])

sample = mixture.getSample(1000)
distribution = ot.KernelSmoothing().build(sample)
algo = ot.MinimumVolumeClassifier(distribution, 0.8)
Exemple #22
0
    return graph


# %%
# The `computeMinimumVolumeInterval` returns an `Interval`.

# %%
graph = drawPDFAndInterval1D(n, interval, alpha)
view = viewer.View(graph)

# %%
# With a Mixture, minimum volume LevelSet
# ---------------------------------------

# %%
m = ot.Mixture([ot.Normal(-5., 1.), ot.Normal(5., 1.)], [0.2, 0.8])

# %%
graph = m.drawPDF()
view = viewer.View(graph)

# %%
alpha = 0.9
levelSet, threshold = m.computeMinimumVolumeLevelSetWithThreshold(alpha)
threshold

# %%
# The interesting point is that a `LevelSet` may be non-contiguous. In the current mixture example, this is not an interval.

# %%
graph = drawLevelSet1D(m, levelSet, alpha, threshold, 1000)
from __future__ import print_function
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
import math as m

Id = ot.IdentityMatrix(2)
atoms = [
    ot.Normal([1.0, 2.0], [0.5, 0.8], Id),
    ot.Normal([1.0, -2.0], [0.9, 0.8], Id),
    ot.Normal([-1.0, 0.0], [0.5, 0.6], Id)
]
weights = [0.3, 0.3, 0.4]
mixture = ot.Mixture(atoms, weights)
data = mixture.getSample(1000)
classifier = ot.MixtureClassifier(mixture)
graph = mixture.drawPDF(data.getMin(), data.getMax())
graph.setLegendPosition("")
graph.setTitle("MixtureClassifier example")
classes = classifier.classify(data)
palette = ot.Drawable.BuildDefaultPalette(len(atoms))
symbols = ot.Drawable.GetValidPointStyles()
for i in range(classes.getSize()):
    index = classes[i]
    graph.add(
        ot.Cloud([data[i]], palette[index % len(palette)],
                 symbols[index % len(symbols)]))

fig = plt.figure(figsize=(4, 4))
axis = fig.add_subplot(111)
axis.set_xlim(auto=True)
def CostFunction(func, p, m, lower, upper, N, mode, threshold, design, MinMax):
    '''
    Return the probability of failure correponding to the sequence of Canonical
    in the general case where the input distribution can be continuous.
    Should be used with the NoisyDE solver.
    '''
    dim = len(lower)
    # We concatenate p per block of variable
    if len(m) == dim:
        pp = []
        t = 0
        for i in range(dim):
            pp.append(p[t:t + len(m[i]) + 1])
            t = t + len(m[i]) + 1
    else:
        print('error size of moment vector')
    Z = [[]] * dim
    Wgt = [[]] * dim
    NewMom = [[]] * dim
    m_copy = m.copy()
    for i in range(dim):
        if mode[i] is not None:
            m_copy[i] = np.append([1], m_copy[i])
            NewMom[i] = [
                (j + 1) * m_copy[i][j] - (j) * mode[i] * m_copy[i][j - 1]
                for j in range(1, len(m_copy[i]))
            ]
            Z[i], Wgt[i] = Canonical_to_Position(
                [lower[i]], [upper[i]],
                QD_Algorithm(
                    Affine_Transformation(lower[i], upper[i], NewMom[i])) +
                pp[i])
        else:
            Z[i], Wgt[i] = Canonical_to_Position(
                [lower[i]], [upper[i]],
                QD_Algorithm(
                    Affine_Transformation(lower[i], upper[i], m_copy[i])) +
                pp[i])

    if not np.any([type(Z[i]) == int for i in range(len(Z))]):
        if design == 'MC':
            PERT = []
            for i in range(dim):
                if mode[i] is not None:
                    U = []
                    for j in range(len(m[i]) + 1):
                        U.append(
                            ot.Uniform(float(min(mode[i], Z[i][j])),
                                       float(max(mode[i], Z[i][j]))))
                    PERT.append(ot.Mixture(U, Wgt[i]))
                else:
                    U = []
                    for j in range(len(m[i]) + 1):
                        U.append(ot.Dirac(Z[i][j]))
                    PERT.append(ot.Mixture(U, Wgt[i]))
            DIST = ot.ComposedDistribution(PERT)
            Sample = DIST.getSample(N)
            return MinMax * sum(func(Sample) <= threshold) / N

        elif design == 'LHS':
            Sample = LHSdesign(Z, Wgt, mode, N)
            return MinMax * sum(func(Sample) <= threshold) / N
    else:
        return 1