Example #1
0
 def getFirstOrderAsymptoticDistribution(self):
     indicesFO = self.getAggregatedFirstOrderIndices()
     varianceFO, varianceTO = self.computeVariance()
     foDist = ot.DistributionCollection(self.input_dim)
     for p in range(self.input_dim):
         foDist[p] = ot.Normal(indicesFO[p], np.sqrt(varianceFO[p]))
     return foDist
Example #2
0
def setInputRandomVector(inList):
    #setting the input random vector X
    myCollection = ot.DistributionCollection(len(inList))
    for index in range(len(inList)):
        myCollection[index] = ot.Distribution(inList[index])
    myDistribution = ot.ComposedDistribution(myCollection)
    VectX = ot.RandomVector(ot.Distribution(myDistribution))
    return myDistribution, VectX
Example #3
0
    def __init__(self, monteCarloResult, distribution, deltas):
        # the monte carlo result must have its underlying function with
        # the history enabled because the failure sample is obtained using it
        self._monteCarloResult = monteCarloResult
        self.function = ot.MemoizeFunction(
            self._monteCarloResult.getEvent().getFunction())
        if self.function.getOutputHistory().getSize() == 0:
            raise AttributeError("The performance function of the Monte Carlo "+\
                                 "simulation result should be a MemoizeFunction.")

        # the original distribution
        if distribution.hasIndependentCopula():
            self._distribution = distribution
        else:
            raise Exception(
                "The distribution must have an independent copula.")
        self._dim = self._distribution.getDimension()

        # the 1d or 2d sequence of deltas
        self._originalDelta = np.vstack(np.array(deltas))
        self._deltaValues = self._originalDelta.copy()
        self._deltaSize = self._deltaValues.shape[0]

        if self._deltaValues.shape[1] != 1 and self._deltaValues.shape[
                1] != self._dim:
            raise AttributeError('The deltas parameter must be 1d sequence of ' + \
                                 'float or 2d sequence of float of dimension ' +\
                                 'equal to {}.'.format(self._dim))

        # check if the delta values have only one dimension -> copy the columns
        if self._deltaValues.shape[1] == 1:
            self._deltaValues = np.ones((self._deltaValues.shape[0], self._dim)) * \
                             self._deltaValues

        # initialize array result
        # rows : delta
        # columns : maginal
        self._pfdelta = np.zeros((self._deltaSize, self._dim))
        self._varPfdelta = np.zeros((self._deltaSize, self._dim))
        self._indices = np.zeros((self._deltaSize, self._dim))
        # for loop to avoid copy id of the distribution collection
        self._estimatorDist = [
            ot.DistributionCollection(self._dim)
            for i in range(self._deltaSize)
        ]

        # set the gaus Kronrod algorithm
        self._gaussKronrod = ot.GaussKronrod(
            50, 1e-5, ot.GaussKronrodRule(ot.GaussKronrodRule.G7K15))
Example #4
0
def GenerateExperiencePlan(paras_range=paras_ris_range, n_sample=50000):
    """
    """
    for k, v in paras_range.items():
        assert v[0] < v[1], 'ERROR of v[0]>=v[1] for ranges {}'.format(k)

    # le nombre de paramètre doit être proportionné au dimension de ranges
    len_actual_parameters = len(paras_range)
    # Specify the input random vector.
    # instance de classe densite de proba a definir
    myCollection = ot.DistributionCollection(len_actual_parameters)

    for i, p_name in enumerate(paras_range):
        distribution = ot.Uniform(paras_range[p_name][0],
                                  paras_range[p_name][1])
        myCollection[i] = ot.Distribution(distribution)
    myDistribution = ot.ComposedDistribution(myCollection)
    vectX = ot.RandomVector(ot.Distribution(myDistribution))

    # Sample the input random vector.
    Xsample = GenerateSample(vectX, n_sample, method='QMC')
    xsample = np.array(Xsample)
    return xsample
distribution_J3 = ot.Normal(mu_J, sigma_J)
distribution_J4 = ot.Normal(mu_J, sigma_J)

# Précharge : 
Delta_P = 0.025
mu_P = 0.045
sigma_P = Delta_P/6

distribution_P1 = ot.Normal(mu_P, sigma_P)
distribution_P2 = ot.Normal(mu_P, sigma_P)
distribution_P3 = ot.Normal(mu_P, sigma_P)
distribution_P4 = ot.Normal(mu_P, sigma_P)


# Création de la collection des distributions d'entrées
myCollection = ot.DistributionCollection(dim)
myCollection[0] = distribution_Dx1
myCollection[1] = distribution_Dx2
myCollection[2] = distribution_Dx3
myCollection[3] = distribution_Dx4
myCollection[4] = distribution_Dy1
myCollection[5] = distribution_Dy2
myCollection[6] = distribution_Dy3
myCollection[7] = distribution_Dy4
myCollection[8] = distribution_J1
myCollection[9] = distribution_J2
myCollection[10] = distribution_J3
myCollection[11] = distribution_J4
myCollection[12] = distribution_P1
myCollection[13] = distribution_P2
myCollection[14] = distribution_P3
Example #6
0
ot.TESTPREAMBLE()

# Instantiate one distribution object
dimension = 3
meanPoint = ot.Point(dimension, 1.0)
meanPoint[0] = 0.5
meanPoint[1] = -0.5
sigma = ot.Point(dimension, 1.0)
sigma[0] = 2.0
sigma[1] = 3.0
R = ot.CorrelationMatrix(dimension)
for i in range(1, dimension):
    R[i, i - 1] = 0.5

# Create a collection of distribution
aCollection = ot.DistributionCollection()

aCollection.add(ot.Normal(meanPoint, sigma, R))
meanPoint += ot.Point(meanPoint.getDimension(), 1.0)
aCollection.add(ot.Normal(meanPoint, sigma, R))
meanPoint += ot.Point(meanPoint.getDimension(), 1.0)
aCollection.add(ot.Normal(meanPoint, sigma, R))

# Instantiate one distribution object
distribution = ot.Mixture(aCollection, ot.Point(aCollection.getSize(), 2.0))
print("Distribution ", repr(distribution))
print("Weights = ", repr(distribution.getWeights()))
weights = distribution.getWeights()
weights[0] = 2.0 * weights[0]
distribution.setWeights(weights)
print("After update, new weights = ", repr(distribution.getWeights()))
Example #7
0
    Zb = 55.5  # m
    S = Zv + (Q / (Ks * B * m.sqrt((Zm - Zv) / L)))**(3. / 5) - (Hd + Zb)
    return [S]


function = ot.PythonFunction(dim, 1, flood_model)

Q_law = ot.TruncatedDistribution(
    ot.Gumbel(1. / 558., 1013., ot.Gumbel.ALPHABETA), 0.,
    ot.TruncatedDistribution.LOWER)
# alpha=1/b, beta=a | you can use Gumbel(a, b, Gumbel.AB) starting from OT 1.2
Ks_law = ot.TruncatedDistribution(ot.Normal(30.0, 7.5), 0.,
                                  ot.TruncatedDistribution.LOWER)
Zv_law = ot.Triangular(49., 50., 51.)
Zm_law = ot.Triangular(54., 55., 56.)
coll = ot.DistributionCollection([Q_law, Ks_law, Zv_law, Zm_law])
distribution = ot.ComposedDistribution(coll)

x = list(map(lambda dist: dist.computeQuantile(0.5)[0], coll))
fx = function(x)

for k in [0.0, 2.0, 5.0, 8.][0:1]:
    randomVector = ot.RandomVector(distribution)
    composite = ot.RandomVector(function, randomVector)

    print('--------------------')
    print('model flood S <', k, 'gamma=', end=' ')
    print('f(', ot.NumericalPoint(x), ')=', fx)

    event = ot.Event(composite, ot.Greater(), k)
    for n in [100, 1000, 5000][1:2]:
# Configure the optimization solver
# ---------------------------------

# %%
# The following example checks the robustness of the optimization of the kriging algorithm with respect to the optimization of the likelihood function in the covariance model parameters estimation. We use a `MultiStart` algorithm in order to avoid to be trapped by a local minimum. Furthermore, we generate the design of experiments using a `LHSExperiments`, which guarantees that the points will fill the space. 

# %%
sampleSize_train = 10
X_train = myDistribution.getSample(sampleSize_train)
Y_train = model(X_train)

# %%
# First, we create a multivariate distribution, based on independent `Uniform` marginals which have the bounds required by the covariance model.

# %%
distributions = ot.DistributionCollection()
for i in range(dim):
    distributions.add(ot.Uniform(lbounds[i], ubounds[i]))
boundedDistribution = ot.ComposedDistribution(distributions)

# %%
# We first generate a Latin Hypercube Sampling (LHS) design made of 25 points in the sample space. This LHS is optimized so as to fill the space.

# %%
K = 25 # design size
LHS = ot.LHSExperiment(boundedDistribution, K)
LHS.setAlwaysShuffle(True)
SA_profile = ot.GeometricProfile(10., 0.95, 20000)
LHS_optimization_algo = ot.SimulatedAnnealingLHS(LHS, SA_profile, ot.SpaceFillingC2())
LHS_optimization_algo.generate()
LHS_design = LHS_optimization_algo.getResult()
Example #9
0
ot.TESTPREAMBLE()

# Instantiate one distribution object
dim = 2
meanPoint = [0.5, -0.5]
sigma = [2.0, 3.0]
R = ot.CorrelationMatrix(dim)
for i in range(1, dim):
    R[i, i - 1] = 0.5

distribution = ot.Normal(meanPoint, sigma, R)
discretization = 100
kernel = ot.KernelSmoothing()
sample = distribution.getSample(discretization)
kernels = ot.DistributionCollection(0)
kernels.add(ot.Normal())
kernels.add(ot.Epanechnikov())
kernels.add(ot.Uniform())
kernels.add(ot.Triangular())
kernels.add(ot.Logistic())
kernels.add(ot.Beta(2.0, 2.0, -1.0, 1.0))
kernels.add(ot.Beta(3.0, 3.0, -1.0, 1.0))
meanExact = distribution.getMean()
covarianceExact = distribution.getCovariance()
for i in range(kernels.getSize()):
    kernel = kernels[i]
    print("kernel=", kernel.getName())
    smoother = ot.KernelSmoothing(kernel)
    smoothed = smoother.build(sample)
    bw = smoother.getBandwidth()
Example #10
0
                    cstride=1,
                    cmap=pl.matplotlib.cm.jet)
    ax.set_xlabel('$x_1$')
    ax.set_ylabel('$x_2$')
    ax.set_zlabel('$\\varphi_i(\mathbf{x})$')
    pl.savefig('2D_identification_eigensolution_%d.png' % i)
    pl.close()

# Calculation of the KL coefficients by functional projection using
# Gauss-Legendre quadrature
xi = estimated_random_field.compute_coefficients(sample_paths)

# Statistical inference of the KL coefficients' distribution
kernel_smoothing = ot.KernelSmoothing(ot.Normal())
xi_marginal_distributions = ot.DistributionCollection([
    kernel_smoothing.build(xi[:, i][:, np.newaxis])
    for i in xrange(truncation_order)
])
try:
    xi_copula = ot.NormalCopulaFactory().build(xi)
except RuntimeError:
    print('ERR: The normal copula correlation matrix built from the given\n' +
          'Spearman correlation matrix is not definite positive.\n' +
          'This would require expert judgement on the correlation\n' +
          'coefficients significance (using e.g. Spearman test).\n' +
          'Assuming an independent copula in the sequel...')
    xi_copula = ot.IndependentCopula(truncation_order)
xi_estimated_distribution = ot.ComposedDistribution(xi_marginal_distributions,
                                                    xi_copula)

# Matrix plot of the empirical KL coefficients & their estimated distribution
matrix_plot(xi,
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
ot.RandomGenerator.SetSeed(0)

# Generate sample with the given plane
distribution = ot.ComposedDistribution(
    ot.DistributionCollection(
        [ot.Exponential(), ot.Triangular(-1.0, -0.5, 1.0)]))
marginalDegrees = ot.Indices([3, 6])
myPlane = ot.GaussProductExperiment(ot.Distribution(distribution),
                                    marginalDegrees)

sample = myPlane.generate()

# Create an empty graph
graph = ot.Graph("", "x1", "x2", True, "")

# Create the cloud
cloud = ot.Cloud(sample, "blue", "fsquare", "")

# Then, draw it
graph.add(cloud)
fig = plt.figure(figsize=(4, 4))
plt.suptitle("Gauss product experiment")
axis = fig.add_subplot(111)
axis.set_xlim(auto=True)
View(graph, figure=fig, axes=[axis], add_legend=False)
#! /usr/bin/env python

import openturns as ot

ot.TESTPREAMBLE()

# Instantiate one distribution object
dimension = 3
meanPoint = ot.Point([0.5, -0.5, 1])
sigma = [2, 3, 1]

sample = ot.Sample(0, dimension)
# Create a collection of distribution
aCollection = ot.DistributionCollection()

aCollection.add(ot.Normal(meanPoint, sigma, ot.IdentityMatrix(dimension)))
sample.add(meanPoint)
meanPoint += [1.0] * dimension
aCollection.add(ot.Normal(meanPoint, sigma, ot.IdentityMatrix(dimension)))
sample.add(meanPoint)
meanPoint += [1.0] * dimension
aCollection.add(ot.Normal(meanPoint, sigma, ot.IdentityMatrix(dimension)))
sample.add(meanPoint)

# Instantiate one distribution object
distribution = ot.KernelMixture(ot.Normal(), sigma, sample)
print("Distribution ", repr(distribution))
print("Distribution ", distribution)
distributionRef = ot.Mixture(aCollection)

# Is this distribution elliptical ?
Example #13
0
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot
from math import fabs
import openturns.testing as ott

ot.TESTPREAMBLE()
ot.RandomGenerator.SetSeed(0)

continuousDistributionCollection = ot.DistributionCollection()
discreteDistributionCollection = ot.DistributionCollection()
distributionCollection = ot.DistributionCollection()

beta = ot.Beta(2.0, 1.0, 0.0, 1.0)
distributionCollection.add(beta)
continuousDistributionCollection.add(beta)

gamma = ot.Gamma(1.0, 2.0, 3.0)
distributionCollection.add(gamma)
continuousDistributionCollection.add(gamma)

gumbel = ot.Gumbel(1.0, 2.0)
distributionCollection.add(gumbel)
continuousDistributionCollection.add(gumbel)

lognormal = ot.LogNormal(1.0, 1.0, 2.0)
distributionCollection.add(lognormal)
continuousDistributionCollection.add(lognormal)

logistic = ot.Logistic(1.0, 1.0)
Example #14
0
    print("kurtosis      =", kurtosis)
    print("kurtosis (ref)=", distributionReference.getKurtosis())
    covariance = distribution.getCovariance()
    print("covariance      =", covariance)
    print("covariance (ref)=", distributionReference.getCovariance())
    parameters = distribution.getParametersCollection()
    print("parameters=", parameters)
    print("Standard representative=", distribution.getStandardRepresentative())
    print("blockMin=", distribution.getBlockMin())
    print("blockMax=", distribution.getBlockMax())
    print("maxSize=", distribution.getMaxSize())
    print("alpha=", distribution.getAlpha())
    print("beta=", distribution.getBeta())
# Tests of the simplification mechanism
weights = ot.Point(0)
coll = ot.DistributionCollection(0)
coll.add(ot.Dirac(0.5))
weights.add(1.0)
coll.add(ot.Normal(1.0, 2.0))
weights.add(2.0)
coll.add(ot.Normal(2.0, 1.0))
weights.add(-3.0)
coll.add(ot.Uniform(-2.0, 2.0))
weights.add(-1.0)
coll.add(ot.Uniform(2.0, 4.0))
weights.add(2.0)
coll.add(ot.Exponential(2.0, -3.0))
weights.add(1.5)
rm = ot.RandomMixture(coll, weights)
coll.add(rm)
weights.add(-2.5)
Example #15
0
        sys.exit()
    return model

#§ 2. Random vector definition
# Create the marginal distributions of the input random vector
dict_distribution = {
    "PARE0":ot.Normal(7032392.1, 20700.0),
    "QARE0":ot.Normal(2142.972222, 8.5),
    "TARE0":ot.Normal(500.2497543, 3.0),
    "QGSS0":ot.Normal(183.713226, 0.75),
    "QGRE0":ot.Normal(1922.331218, 8.5),
    "PGCT0":ot.Normal(6.54e6, 6.40e4),
}

# Create the input probability distribution
collectionMarginals = ot.DistributionCollection()
for distribution in list(dict_distribution.values()):
    collectionMarginals.add(ot.Distribution(distribution))

inputDistribution = ot.ComposedDistribution(collectionMarginals)

# Give a description of each component of the input distribution
inputDistribution.setDescription(list(dict_distribution.keys()))
inputRandomVector = ot.RandomVector(inputDistribution)

#§
def run_demo(with_initialization_script, seed=None, n_simulation=None):
    """Run the demonstration

    Parameters
    ----------
# Posterior for mu: E(mu|y)=w*y_mean+(1-w)*mu0, and Var(mu|y)=w*(sigmay^2)/n
# => weighted average of the prior an the sample mean
# with w = n*sigma0^2 / (n*sigma0^2 + sigma^2)

# Log::Show(Log::ALL)
# observations
size = 10
realDist = ot.Normal(31., 1.2)

data = realDist.getSample(size)

# calibration parameters
calibrationColl = [ot.CalibrationStrategy()] * 2

# proposal distribution
proposalColl = ot.DistributionCollection()
mean_proposal = ot.Uniform(-2.0, 2.0)
std_proposal = ot.Uniform(-2.0, 2.0)
proposalColl.add(mean_proposal)
proposalColl.add(std_proposal)

# prior distribution
mu0 = 25.

sigma0s = [0.1, 1.0]
# sigma0s.append(2.0)

# play with the variance of the prior:
# if the prior variance is low (information concernig the mu parameter is strong)
# then the posterior mean will be equal to the prior mean
# if large, the the posterior distribution is equivalent to the