示例#1
0
    def __init__(self):
        self.dim = 6

        self.model = ot.SymbolicFunction(["F", "L", "a", "De", "di", "E"], [
                                         "g1", "g2", "g3"], "var I:=pi_*(De^4-di^4)/32; var b:=L-a; g1:=-F*a^2*(L-a)^2/(3*E*L*I); g2:=-F*b*(L^2-b^2)/(6*E*L*I); g3:=F*a*(L^2-a^2)/(6*E*L*I)")
        self.model.setOutputDescription(
            ["Deflection", "Left angle", "Right angle"])

        self.XF = ot.Normal(1, 0.1)
        self.XF.setDescription(["Force"])

        self.XE = ot.Normal(200000, 2000)
        self.XE.setDescription(["Young Modulus"])

        self.XL = ot.Dirac(1.5)
        self.XL.setDescription(["Length"])

        self.Xa = ot.Dirac(1.0)
        self.Xa.setDescription(["Location"])

        self.XD = ot.Dirac(0.8)
        self.XD.setDescription(["External diameter"])

        self.Xd = ot.Dirac(0.1)
        self.Xd.setDescription(["Internal diameter"])

        self.inputDistribution = ot.ComposedDistribution(
            [self.XF, self.XL, self.Xa, self.XD, self.Xd, self.XE])
示例#2
0
    def _computePODSamplePerDefect(self, defect, detection, krigingResult,
                                   transformation, distribution,
                                   simulationSize, samplingSize):
        """
        Compute the POD sample for a defect size.
        """

        dim = distribution.getDimension()
        # create a distibution with a dirac distribution for the defect size
        diracDist = [ot.Dirac(defect)]
        diracDist += [distribution.getMarginal(i + 1) for i in range(dim - 1)]
        distribution = ot.ComposedDistribution(diracDist)

        # create a sample for the Monte Carlo simulation and confidence interval
        MC_sample = distribution.getSample(samplingSize)
        # Kriging_RV = ot.KrigingRandomVector(krigingResult, MC_sample)
        # Y_sample = Kriging_RV.getSample(simulationSize)
        Y_sample = self._randomVectorSampling(krigingResult,
                                              transformation(MC_sample),
                                              simulationSize, samplingSize)

        # compute the POD for all simulation size
        POD_MCPG_a = np.mean(Y_sample > detection, axis=1)
        # compute the variance of the MC simulation using TCL
        VAR_TCL = np.array(POD_MCPG_a) * (
            1 - np.array(POD_MCPG_a)) / Y_sample.shape[1]
        # Create distribution of the POD estimator for all simulation
        POD_PG_dist = []
        for i in range(simulationSize):
            if VAR_TCL[i] > 0:
                POD_PG_dist += [ot.Normal(POD_MCPG_a[i], np.sqrt(VAR_TCL[i]))]
            else:
                if POD_MCPG_a[i] < 1:
                    POD_PG_dist += [ot.Dirac([0.])]
                else:
                    POD_PG_dist += [ot.Dirac([1.])]
        POD_PG_alea = ot.Mixture(POD_PG_dist)
        # get a sample of these distributions
        POD_PG_sample = POD_PG_alea.getSample(simulationSize * samplingSize)

        return POD_PG_sample
示例#3
0
    def __init__(self):
        self.dim = 4  # number of inputs

        self.Strain = ot.Uniform(0.0, 0.07)
        self.Strain.setDescription(["Strain"])

        self.unknownR = 750.0e6
        self.R = ot.Dirac(self.unknownR)
        self.R.setDescription(["R"])

        self.unknownC = 2750.0e6
        self.C = ot.Dirac(self.unknownC)
        self.C.setDescription(["C"])

        self.unknownGamma = 10.0
        self.Gamma = ot.Dirac(self.unknownGamma)
        self.Gamma.setDescription(["Gamma"])

        self.inputDistribution = ot.ComposedDistribution([self.Strain, self.R, self.C, self.Gamma])

        self.model = ot.PythonFunction(4, 1, g)
# a- observations
nbObs = 100
inObs = ot.Uniform(0., 10.).getSample(nbObs)
inObs.setDescription(['x1'])

y0Noise = ot.Normal(0, 0.1).getSample(nbObs)
y0Sample = ot.ParametricFunction(symbolicModel.getFunction('y0'), [1, 2], [1.2, 1.])(inObs)

y0Obs = y0Sample + y0Noise
y0Obs.setDescription(['y0'])
obs = persalys.Observations("observations", symbolicModel, inObs, y0Obs)
myStudy.add(obs)

# b- calibrationAnalysis
calibration = persalys.CalibrationAnalysis('calibration', obs)
calibration.setCalibratedInputs(['x2'], ot.Dirac([1.2]), ['x3'], [1.1])
calibration.setMethodName('GaussianNonlinear')
sigma = 0.15
errorCovariance = ot.CovarianceMatrix(1)
errorCovariance[0, 0] = sigma**2
calibration.setErrorCovariance(errorCovariance)
calibration.setBootStrapSize(25)
calibration.setConfidenceIntervalLength(0.99)

optimAlgo = calibration.getOptimizationAlgorithm()
optimAlgo.setMaximumEvaluationNumber(50)
optimAlgo.setMaximumAbsoluteError(1e-6)
optimAlgo.setMaximumRelativeError(1e-6)
optimAlgo.setMaximumResidualError(1e-6)
optimAlgo.setMaximumConstraintError(1e-6)
calibration.setOptimizationAlgorithm(optimAlgo)
示例#5
0
        "a +  -1.0  * b +  1.0  * c",
        "a +  -0.6  * b +  0.36  * c",
        "a +  -0.2  * b +  0.04  * c",
        "a +  0.2  * b +  0.04  * c",
        "a +  0.6  * b +  0.36  * c",
        "a +  1.0  * b +  1.0  * c",
    ],
)
inputDimension = g.getInputDimension()
outputDimension = g.getOutputDimension()

trueParameter = ot.Point([12.0, 7.0, -8])

parameterDimension = trueParameter.getDimension()

Theta1 = ot.Dirac(trueParameter[0])
Theta2 = ot.Dirac(trueParameter[1])
Theta3 = ot.Dirac(trueParameter[2])

inputRandomVector = ot.ComposedDistribution([Theta1, Theta2, Theta3])

candidate = ot.Point([8.0, 9.0, -6.0])

calibratedIndices = [0, 1, 2]
model = ot.ParametricFunction(g, calibratedIndices, candidate)

outputObservationNoiseSigma = 0.01
meanNoise = ot.Point(outputDimension)
covarianceNoise = ot.Point(outputDimension, outputObservationNoiseSigma)
R = ot.IdentityMatrix(outputDimension)
observationOutputNoise = ot.Normal(meanNoise, covarianceNoise, R)
示例#6
0
f = ot.PythonFunction(4, 1, functionCrue)
f.enableHistory()

# 2. Random vector definition
Q = ot.Gumbel(1. / 558., 1013.)
print(Q)
'''
Q = ot.Gumbel()
Q.setParameter(ot.GumbelAB()([1013., 558.]))
print(Q)
'''
Q = ot.TruncatedDistribution(Q, 0, inf)
unknownKs = 30.0
unknownZv = 50.0
unknownZm = 55.0
K_s = ot.Dirac(unknownKs)
Z_v = ot.Dirac(unknownZv)
Z_m = ot.Dirac(unknownZm)

# 3. View the PDF
Q.setDescription(["Q (m3/s)"])
K_s.setDescription(["Ks (m^(1/3)/s)"])
Z_v.setDescription(["Zv (m)"])
Z_m.setDescription(["Zm (m)"])

# 4. Create the joint distribution function
inputRandomVector = ot.ComposedDistribution([Q, K_s, Z_v, Z_m])

# 5. Create the Monte-Carlo algorithm
sampleSize = 100
inputSample = inputRandomVector.getSample(sampleSize)
示例#7
0
    print("kurtosis (ref)=", distributionReference.getKurtosis())
    covariance = distribution.getCovariance()
    print("covariance      =", covariance)
    print("covariance (ref)=", distributionReference.getCovariance())
    parameters = distribution.getParametersCollection()
    print("parameters=", parameters)
    print("Standard representative=", distribution.getStandardRepresentative())
    print("blockMin=", distribution.getBlockMin())
    print("blockMax=", distribution.getBlockMax())
    print("maxSize=", distribution.getMaxSize())
    print("alpha=", distribution.getAlpha())
    print("beta=", distribution.getBeta())
# Tests of the simplification mechanism
weights = ot.Point(0)
coll = ot.DistributionCollection(0)
coll.add(ot.Dirac(0.5))
weights.add(1.0)
coll.add(ot.Normal(1.0, 2.0))
weights.add(2.0)
coll.add(ot.Normal(2.0, 1.0))
weights.add(-3.0)
coll.add(ot.Uniform(-2.0, 2.0))
weights.add(-1.0)
coll.add(ot.Uniform(2.0, 4.0))
weights.add(2.0)
coll.add(ot.Exponential(2.0, -3.0))
weights.add(1.5)
rm = ot.RandomMixture(coll, weights)
coll.add(rm)
weights.add(-2.5)
coll.add(ot.Gamma(3.0, 4.0, -2.0))
def CostFunction(func, p, m, lower, upper, N, mode, threshold, design, MinMax):
    '''
    Return the probability of failure correponding to the sequence of Canonical
    in the general case where the input distribution can be continuous.
    Should be used with the NoisyDE solver.
    '''
    dim = len(lower)
    # We concatenate p per block of variable
    if len(m) == dim:
        pp = []
        t = 0
        for i in range(dim):
            pp.append(p[t:t + len(m[i]) + 1])
            t = t + len(m[i]) + 1
    else:
        print('error size of moment vector')
    Z = [[]] * dim
    Wgt = [[]] * dim
    NewMom = [[]] * dim
    m_copy = m.copy()
    for i in range(dim):
        if mode[i] is not None:
            m_copy[i] = np.append([1], m_copy[i])
            NewMom[i] = [
                (j + 1) * m_copy[i][j] - (j) * mode[i] * m_copy[i][j - 1]
                for j in range(1, len(m_copy[i]))
            ]
            Z[i], Wgt[i] = Canonical_to_Position(
                [lower[i]], [upper[i]],
                QD_Algorithm(
                    Affine_Transformation(lower[i], upper[i], NewMom[i])) +
                pp[i])
        else:
            Z[i], Wgt[i] = Canonical_to_Position(
                [lower[i]], [upper[i]],
                QD_Algorithm(
                    Affine_Transformation(lower[i], upper[i], m_copy[i])) +
                pp[i])

    if not np.any([type(Z[i]) == int for i in range(len(Z))]):
        if design == 'MC':
            PERT = []
            for i in range(dim):
                if mode[i] is not None:
                    U = []
                    for j in range(len(m[i]) + 1):
                        U.append(
                            ot.Uniform(float(min(mode[i], Z[i][j])),
                                       float(max(mode[i], Z[i][j]))))
                    PERT.append(ot.Mixture(U, Wgt[i]))
                else:
                    U = []
                    for j in range(len(m[i]) + 1):
                        U.append(ot.Dirac(Z[i][j]))
                    PERT.append(ot.Mixture(U, Wgt[i]))
            DIST = ot.ComposedDistribution(PERT)
            Sample = DIST.getSample(N)
            return MinMax * sum(func(Sample) <= threshold) / N

        elif design == 'LHS':
            Sample = LHSdesign(Z, Wgt, mode, N)
            return MinMax * sum(func(Sample) <= threshold) / N
    else:
        return 1
# 1. The function G
def modeleChaboche(X):
    strain, R, C, gamma = X
    stress = R + C * (1 - np.exp(-gamma * strain))
    return [stress]


# Creation of the problem function
f = ot.PythonFunction(4, 1, modeleChaboche)

# 2. Random vector definition
Strain = ot.Uniform(0, 0.07)
unknownR = 750e6
unknownC = 2750e6
unknownGamma = 10
R = ot.Dirac(unknownR)
C = ot.Dirac(unknownC)
Gamma = ot.Dirac(unknownGamma)

# 3. View the PDF
Strain.setDescription(["Strain"])
R.setDescription(["R"])
C.setDescription(["C"])
Gamma.setDescription(["Gamma"])

# 4. Create the joint distribution function
inputRandomVector = ot.ComposedDistribution([Strain, R, C, Gamma])

# 5. Create the Monte-Carlo algorithm
sampleSize = 100
inputSample = inputRandomVector.getSample(sampleSize)
示例#10
0
print(
    "with automatic lower/user defined upper boundaries correction, pdf(left)=%.6g"
    % ks8.computePDF(left), ", pdf(right)=%.6g" % ks8.computePDF(right))

algo9 = ot.KernelSmoothing(ot.Normal(), False)
algo9.setBoundingOption(ot.KernelSmoothing.BOTH)
algo9.setLowerBound(-1.0)
algo9.setUpperBound(1.0)
ks9 = algo9.build(sample)
print(
    "with user defined boundaries correction, pdf(left)=%.6g" %
    ks9.computePDF(left), ", pdf(right)=%.6g" % ks9.computePDF(right))

# full degenerate case
sample = ot.ComposedDistribution(
    [ot.Dirac(-7.0), ot.Dirac(0.0),
     ot.Dirac(8.0)]).getSample(50)
smoothed = ot.KernelSmoothing().build(sample)
print(smoothed.getSample(3))

# n-d degenerate case
sample = ot.ComposedDistribution(
    [ot.Dirac(-7.0), ot.Arcsine(2.0, 3.0),
     ot.Dirac(8.0)]).getSample(50)
sample.setDescription(['d7', 'a23', 'd8'])
smoothed = ot.KernelSmoothing().build(sample)
print(smoothed.getSample(3))

# Test with reduced Cutoff - generates non positive phiGammaH
distribution = ot.Normal()
kernel = ot.Normal()
示例#11
0
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
if (ot.Dirac().__class__.__name__ == 'ComposedDistribution'):
    correlation = ot.CorrelationMatrix(2)
    correlation[1, 0] = 0.25
    aCopula = ot.NormalCopula(correlation)
    marginals = [ot.Normal(1.0, 2.0), ot.Normal(2.0, 3.0)]
    distribution = ot.ComposedDistribution(marginals, aCopula)
elif (ot.Dirac().__class__.__name__ == 'CumulativeDistributionNetwork'):
    distribution = ot.CumulativeDistributionNetwork(
        [ot.Normal(2), ot.Dirichlet([0.5, 1.0, 1.5])],
        ot.BipartiteGraph([[0, 1], [0, 1]]))
else:
    distribution = ot.Dirac()
dimension = distribution.getDimension()
if dimension <= 2:
    if distribution.getDimension() == 1:
        distribution.setDescription(['$x$'])
        pdf_graph = distribution.drawPDF()
        cdf_graph = distribution.drawCDF()
        fig = plt.figure(figsize=(10, 4))
        plt.suptitle(str(distribution))
        pdf_axis = fig.add_subplot(121)
        cdf_axis = fig.add_subplot(122)
        View(pdf_graph, figure=fig, axes=[pdf_axis], add_legend=False)
        View(cdf_graph, figure=fig, axes=[cdf_axis], add_legend=False)
    else:
        distribution.setDescription(['$x_1$', '$x_2$'])
        pdf_graph = distribution.drawPDF()
        fig = plt.figure(figsize=(10, 5))
示例#12
0
from __future__ import print_function
import openturns as ot

observationsSize = 5
# Create a collection of distribution
conditionedDistribution = ot.Normal()
conditioningDistributionCollection = []
# First conditioning distribution: continuous/continuous
atoms = [ot.Uniform(0.0, 1.0), ot.Uniform(1.0, 2.0)]
conditioningDistributionCollection.append(ot.ComposedDistribution(atoms))
# Second conditioning distribution: discrete/continuous
atoms = [ot.Binomial(3, 0.5), ot.Uniform(1.0, 2.0)]
#conditioningDistributionCollection.append(ot.ComposedDistribution(atoms))
# Third conditioning distribution: dirac/continuous
atoms = [ot.Dirac(0.0), ot.Uniform(1.0, 2.0)]
conditioningDistributionCollection.append(ot.ComposedDistribution(atoms))


for conditioning in conditioningDistributionCollection:
    print("conditioning distribution=", conditioning)
    observationsDistribution = ot.Distribution(conditionedDistribution)
    observationsDistribution.setParameter(conditioning.getMean())
    observations = observationsDistribution.getSample(observationsSize)
    distribution  = ot.PosteriorDistribution(ot.ConditionalDistribution(conditionedDistribution, conditioning), observations)
    dim = distribution.getDimension()
    print("Distribution ", distribution)
    print("Distribution ", distribution)
    print("range=", distribution.getRange())
    mean = distribution.getMean()
    print("Mean ", mean)
# prior distribution
mu0 = 25.

sigma0s = [0.1, 1.0]
# sigma0s.append(2.0)

# play with the variance of the prior:
# if the prior variance is low (information concernig the mu parameter is strong)
# then the posterior mean will be equal to the prior mean
# if large, the the posterior distribution is equivalent to the
# distribution of the sample mean
for i in range(len(sigma0s)):

    sigma0 = sigma0s[i]
    mean_prior = ot.Normal(mu0, sigma0)
    std_prior = ot.Dirac(2.0)  # standard dev is known
    prior = ot.ComposedDistribution([mean_prior, std_prior])

    # choose the initial state within the prior
    initialState = prior.getRealization()

    # conditional distribution
    conditional = ot.Normal()

    # create a metropolis-hastings sampler
    sampler = ot.RandomWalkMetropolisHastings(
        prior, conditional, data, initialState, proposalColl)
    sampler.setVerbose(True)
    sampler.setThinning(2)
    sampler.setBurnIn(500)
    sampler.setCalibrationStrategyPerComponent(calibrationColl)
示例#14
0
stressObs.setDescription(['sigma'])
observations = persalys.Observations('obs1', model, strainObs, stressObs)

myStudy.add(observations)

# Least Squares linear
analysis = persalys.CalibrationAnalysis('myAnalysis', observations)
analysis.run()
myStudy.add(analysis)
print("analysis=", analysis)
print("result=", analysis.getResult())

# Least Squares Non linear
analysis2 = persalys.CalibrationAnalysis('myAnalysis2', observations)
analysis2.setMethodName('LeastSquaresNonlinear')
analysis2.setCalibratedInputs(['R', 'C'], ot.Dirac([700e6, 2400e6]), ['gam'],
                              [7.])
analysis2.run()
myStudy.add(analysis2)
print("analysis=", analysis2)

# Gaussian linear calibration
analysis3 = persalys.CalibrationAnalysis('myAnalysis3', observations)
sigmaStress = 1.e7  # (Pa)
errorCovariance = ot.CovarianceMatrix(1)
errorCovariance[0, 0] = sigmaStress**2
analysis3.setMethodName('GaussianLinear')
analysis3.setErrorCovariance(errorCovariance)
analysis3.run()
myStudy.add(analysis3)
print("analysis=", analysis3)
示例#15
0
print("isoprobabilistic transformation (general normal)=",
      distribution.getIsoProbabilisticTransformation())
# General case with non-normal standard distribution
collection[0] = ot.SklarCopula(ot.Student(
    3.0, [1.0]*2, [3.0]*2, ot.CorrelationMatrix(2)))
collection.append(ot.Triangular(2.0, 3.0, 4.0))
distribution = ot.BlockIndependentDistribution(collection)
print("isoprobabilistic transformation (general non-normal)=",
      distribution.getIsoProbabilisticTransformation())
dim = distribution.getDimension()
x = 0.6
y = [0.2] * (dim - 1)
print("conditional PDF=%.5f" % distribution.computeConditionalPDF(x, y))
print("conditional CDF=%.5f" % distribution.computeConditionalCDF(x, y))
print("conditional quantile=%.5f" %
      distribution.computeConditionalQuantile(x, y))
pt = ot.Point(dim)
for i in range(dim):
    pt[i] = 0.1 * i + 0.05
print("sequential conditional PDF=",
      distribution.computeSequentialConditionalPDF(pt))
resCDF = distribution.computeSequentialConditionalCDF(pt)
print("sequential conditional CDF(", pt, ")=", resCDF)
print("sequential conditional quantile(", resCDF, ")=",
      distribution.computeSequentialConditionalQuantile(resCDF))
print('range=', distribution.getRange())

# getStandardDeviation vs Dirac
distribution2 = ot.BlockIndependentDistribution([ot.Normal(), ot.Dirac(1800)])
ott.assert_almost_equal(distribution2.getStandardDeviation(), [1, 0])
示例#16
0
# prior distribution
mu0 = 25.

sigma0s = [0.1, 1.0]
# sigma0s.append(2.0)

# play with the variance of the prior:
# if the prior variance is low (information concernig the mu parameter is strong)
# then the posterior mean will be equal to the prior mean
# if large, the the posterior distribution is equivalent to the
# distribution of the sample mean
for i in range(len(sigma0s)):

    sigma0 = sigma0s[i]
    mean_prior = ot.Normal(mu0, sigma0)
    std_prior = ot.Dirac(2.0)  # standard dev is known
    prior = ot.ComposedDistribution([mean_prior, std_prior])
    # choose the initial state within the prior
    initialState = prior.getRealization()

    # conditional distribution
    conditional = ot.Normal()

    # create a Gibbs sampler
    mean_sampler = ot.RandomWalkMetropolisHastings(prior, initialState,
                                                   mean_instrumental, [0])
    mean_sampler.setLikelihood(conditional, data)
    std_sampler = ot.RandomWalkMetropolisHastings(prior, initialState,
                                                  std_instrumental, [1])
    std_sampler.setLikelihood(conditional, data)
    sampler = ot.Gibbs([mean_sampler, std_sampler])
示例#17
0
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
if ot.Dirac().__class__.__name__ == 'ComposedDistribution':
    correlation = ot.CorrelationMatrix(2)
    correlation[1, 0] = 0.25
    aCopula = ot.NormalCopula(correlation)
    marginals = [ot.Normal(1.0, 2.0), ot.Normal(2.0, 3.0)]
    distribution = ot.ComposedDistribution(marginals, aCopula)
elif ot.Dirac().__class__.__name__ == 'CumulativeDistributionNetwork':
    distribution = ot.CumulativeDistributionNetwork(
        [ot.Normal(2), ot.Dirichlet([0.5, 1.0, 1.5])],
        ot.BipartiteGraph([[0, 1], [0, 1]]))
elif ot.Dirac().__class__.__name__ == 'Histogram':
    distribution = ot.Histogram([-1.0, 0.5, 1.0, 2.0], [0.45, 0.4, 0.15])
else:
    distribution = ot.Dirac()
dimension = distribution.getDimension()
if dimension == 1:
    distribution.setDescription(['$x$'])
    pdf_graph = distribution.drawPDF()
    cdf_graph = distribution.drawCDF()
    fig = plt.figure(figsize=(10, 4))
    plt.suptitle(str(distribution))
    pdf_axis = fig.add_subplot(121)
    cdf_axis = fig.add_subplot(122)
    View(pdf_graph, figure=fig, axes=[pdf_axis], add_legend=False)
    View(cdf_graph, figure=fig, axes=[cdf_axis], add_legend=False)
elif dimension == 2:
    distribution.setDescription(['$x_1$', '$x_2$'])
    pdf_graph = distribution.drawPDF()
def CostSobol(MyModel, p, m, lower, upper, distribution, indexNumber,
              indexChoice, NSobol, MINMAX):
    '''
    Return the associated sobol index to the measure recovered from the canonical moment sequences
    '''
    dim = len(lower)
    # We concatenate p per block of variable
    if len(m) == dim:
        pp = []
        t = 0
        for i in range(dim):
            pp.append(p[t:t + len(m[i]) + 1])
            t = t + len(m[i]) + 1
    else:
        print('error size of moment vector')

    if indexChoice == 1:
        P = list(
            QD_Algorithm(
                Affine_Transformation(lower[indexNumber], upper[indexNumber],
                                      m[indexNumber]))) + list(pp[indexNumber])
        Position, Weight = Canonical_to_Position([lower[indexNumber]],
                                                 [upper[indexNumber]], P)

        distribution[indexNumber] = ot.Mixture(
            [ot.Dirac(Position[i]) for i in range(len(Position))], Weight)
        composedDistribution = ot.ComposedDistribution(distribution)
        ot.RandomGenerator.SetSeed(0)
        inputDesign = ot.SobolIndicesExperiment(composedDistribution, NSobol,
                                                True).generate()
        outputDesign = MyModel(inputDesign)

        sensitivityAnalysis = ot.SaltelliSensitivityAlgorithm(
            inputDesign, outputDesign, NSobol)
        firstOrder = sensitivityAnalysis.getFirstOrderIndices()
        return MINMAX * firstOrder[indexNumber]

    elif indexChoice == 0:
        t = 0
        P = [[]] * (dim - 1)
        Position = [[]] * (dim - 1)
        Weight = [[]] * (dim - 1)
        for i in range(dim):
            if i != indexNumber:
                P[t] = list(
                    QD_Algorithm(
                        Affine_Transformation(lower[i], upper[i],
                                              m[i]))) + list(pp[i])
                Position[t], Weight[t] = Canonical_to_Position([lower[i]],
                                                               [upper[i]],
                                                               P[t])
                distribution[i] = ot.Mixture([
                    ot.Dirac(Position[t][j]) for j in range(len(Position[t]))
                ], Weight[t])
                t += 1
        composedDistribution = ot.ComposedDistribution(distribution)
        ot.RandomGenerator.SetSeed(0)
        inputDesign = ot.SobolIndicesExperiment(composedDistribution, NSobol,
                                                True).generate()
        outputDesign = MyModel(inputDesign)

        sensitivityAnalysis = ot.SaltelliSensitivityAlgorithm(
            inputDesign, outputDesign, NSobol)
        totalOrder = sensitivityAnalysis.getTotalOrderIndices()
        return MINMAX * totalOrder[indexNumber]
# .. math::
#     \begin{aligned}
#         \forall t_k \in \mathcal{D}, \forall i,j < p , \> m_{i+1,j+1} = \mathbb{P} (X_{t_{k+1}} = j \> | \> X_{t_{k}} = i)
#     \end{aligned}
#
# The library proposes to model it through the object *DiscreteMarkovChain* defined thanks to the origin :math:`X_{t_0}` (which can be either deterministic or uncertain), the transition matrix :math:`\mathcal{M}` and the time grid.

# %%
import openturns as ot
import openturns.viewer as viewer
from matplotlib import pylab as plt
ot.Log.Show(ot.Log.NONE)

# %%
# Define the origin
origin = ot.Dirac(0.0)

# %%
# Define the transition matrix
transition = ot.SquareMatrix([[0.1, 0.3, 0.6], [0.7, 0.1, 0.2],
                              [0.5, 0.3, 0.2]])

# %%
# Define an 1-d mesh
tgrid = ot.RegularGrid(0.0, 1.0, 50)

# %%
# Markov chain definition and realization
process = ot.DiscreteMarkovChain(origin, transition, tgrid)
real = process.getRealization()
graph = real.drawMarginal(0)
示例#20
0
    z = z0 + vinf * t + tau * (v0 - vinf) * (1 - np.exp(-t / tau))
    z = np.maximum(z, zmin)
    altitude = [[zeta] for zeta in z]
    return altitude


inputDim = 5
outputDim = 1
alti = ot.PythonPointToFieldFunction(inputDim, mesh, outputDim, AltiFunc)

# Creation of the input distribution
distZ0 = ot.Uniform(100.0, 150.0)
distV0 = ot.Normal(55.0, 10.0)
distM = ot.Normal(80.0, 8.0)
distC = ot.Uniform(0.0, 30.0)
distZmin = ot.Dirac([0.0])
distX = ot.ComposedDistribution([distZ0, distV0, distM, distC, distZmin])

# Sample the model
samplesize = 1000
inputSample = distX.getSample(samplesize)
outputSample = alti(inputSample)

# Draw some curves
graph = outputSample.drawMarginal(0)
graph.setTitle('chute visqueuse')
graph.setXTitle(r'$t$')
graph.setYTitle(r'$z$')
graph.setColors([
    ot.Drawable.ConvertFromHSV(i * (360.0 / samplesize), 1.0, 1.0)
    for i in range(len(graph.getDrawables()))
示例#21
0
g = ot.PythonFunction(4, 1, functionFlooding) 
g = ot.MemoizeFunction(g)
g.setOutputDescription(["H (m)"])

# %%
# We load the input distribution for :math:`Q` :

# %%
Q = fm.Q
print(Q)

# %%
# Set the parameters to be calibrated.

# %%
K_s = ot.Dirac(30.0)
Z_v = ot.Dirac(50.0)
Z_m = ot.Dirac(55.0)
K_s.setDescription(["Ks (m^(1/3)/s)"])
Z_v.setDescription(["Zv (m)"])
Z_m.setDescription(["Zm (m)"])

# %%
# Create the joint input distribution.

# %%
inputRandomVector = ot.ComposedDistribution([Q, K_s, Z_v, Z_m])

# %%
# Create a Monte-Carlo sample of the output H.
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
if ot.Dirac().__class__.__name__ == 'Bernoulli':
    distribution = ot.Bernoulli(0.7)
elif ot.Dirac().__class__.__name__ == 'Binomial':
    distribution = ot.Binomial(5, 0.2)
elif ot.Dirac().__class__.__name__ == 'ComposedDistribution':
    copula = ot.IndependentCopula(2)
    marginals = [ot.Uniform(1.0, 2.0), ot.Normal(2.0, 3.0)]
    distribution = ot.ComposedDistribution(marginals, copula)
elif ot.Dirac().__class__.__name__ == 'CumulativeDistributionNetwork':
    coll = [ot.Normal(2), ot.Dirichlet([0.5, 1.0, 1.5])]
    distribution = ot.CumulativeDistributionNetwork(
        coll, ot.BipartiteGraph([[0, 1], [0, 1]]))
elif ot.Dirac().__class__.__name__ == 'Histogram':
    distribution = ot.Histogram([-1.0, 0.5, 1.0, 2.0], [0.45, 0.4, 0.15])
elif ot.Dirac().__class__.__name__ == 'KernelMixture':
    kernel = ot.Uniform()
    sample = ot.Normal().getSample(5)
    bandwith = [1.0]
    distribution = ot.KernelMixture(kernel, bandwith, sample)
elif ot.Dirac().__class__.__name__ == 'MaximumDistribution':
    coll = [
        ot.Uniform(2.5, 3.5),
        ot.LogUniform(1.0, 1.2),
        ot.Triangular(2.0, 3.0, 4.0)
    ]
    distribution = ot.MaximumDistribution(coll)
elif ot.Dirac().__class__.__name__ == 'Multinomial':
    distribution = ot.Multinomial(5, [0.2])