Пример #1
0
#! /usr/bin/env python

import openturns as ot

observationsSize = 5
# Create a collection of distribution
conditionedDistribution = ot.Normal()
conditioningDistributionCollection = []
# First conditioning distribution: continuous/continuous
atoms = [ot.Uniform(0.0, 1.0), ot.Uniform(1.0, 2.0)]
conditioningDistributionCollection.append(ot.ComposedDistribution(atoms))
# Second conditioning distribution: discrete/continuous
atoms = [ot.Binomial(3, 0.5), ot.Uniform(1.0, 2.0)]
# conditioningDistributionCollection.append(ot.ComposedDistribution(atoms))
# Third conditioning distribution: dirac/continuous
atoms = [ot.Dirac(0.0), ot.Uniform(1.0, 2.0)]
conditioningDistributionCollection.append(ot.ComposedDistribution(atoms))

for conditioning in conditioningDistributionCollection:
    print("conditioning distribution=", conditioning)
    observationsDistribution = ot.Distribution(conditionedDistribution)
    observationsDistribution.setParameter(conditioning.getMean())
    observations = observationsDistribution.getSample(observationsSize)
    distribution = ot.PosteriorDistribution(
        ot.ConditionalDistribution(conditionedDistribution, conditioning),
        observations)
    dim = distribution.getDimension()
    print("Distribution ", distribution)
    print("Distribution ", distribution)
    print("range=", distribution.getRange())
    mean = distribution.getMean()
Пример #2
0
    def run(self):
        """
        Build the POD models.

        Notes
        -----
        This method build the kriging model. First the censored data
        are filtered if needed. The Box Cox transformation is performed if it is
        enabled. Then it builds the POD models : conditional samples are 
        simulated for each defect size, then the distributions of the probability
        estimator (for MC simulation) are built. Eventually, a sample of this
        distribution is used to compute the mean POD and the POD at the confidence
        level.
        """

        # run the chaos algorithm and get result if not given
        if not self._userKriging:
            if self._verbose:
                print('Start optimizing covariance model parameters...')
            # build the kriging algorithm without optimizer
            algoKriging = self._buildKrigingAlgo(self._input, self._signals)
            # optimize the covariance model parameters and return the kriging
            # algorithm with the run launched
            if LooseVersion(ot.__version__) == '1.9':
                llDim = algoKriging.getReducedLogLikelihoodFunction(
                ).getInputDimension()
            else:
                llDim = algoKriging.getLogLikelihoodFunction(
                ).getInputDimension()
            lowerBound = [0.001] * llDim
            upperBound = [50] * llDim
            algoKriging = self._estimKrigingTheta(algoKriging, lowerBound,
                                                  upperBound,
                                                  self._initialStartSize)
            algoKriging.run()
            if self._verbose:
                print('Kriging optimizer completed')
            self._krigingResult = algoKriging.getResult()

        # compute the Q2
        self._Q2 = self._computeQ2(self._input, self._signals,
                                   self._krigingResult)
        if self._verbose:
            print('kriging validation Q2 (>0.9): {:0.4f}'.format(self._Q2))

        # set default uniform distribution with min and max of the given defect sizes
        if self._distribution is None:
            inputMin = self._input.getMin()
            inputMin[0] = np.min(self._defectSizes)
            inputMax = self._input.getMax()
            inputMax[0] = np.max(self._defectSizes)
            marginals = [
                ot.Uniform(inputMin[i], inputMax[i]) for i in range(self._dim)
            ]
            self._distribution = ot.ComposedDistribution(marginals)

        # compute the sample containing the POD values for all defect
        self._PODPerDefect = ot.NumericalSample(
            self._simulationSize * self._samplingSize, self._defectNumber)
        for i, defect in enumerate(self._defectSizes):
            self._PODPerDefect[:, i] = self._computePODSamplePerDefect(
                defect, self._detectionBoxCox, self._krigingResult,
                self._distribution, self._simulationSize, self._samplingSize)
            if self._verbose:
                updateProgress(i, self._defectNumber,
                               'Computing POD per defect')

        # compute the mean POD
        meanPOD = self._PODPerDefect.computeMean()
        # create the interpolate function of the POD model
        interpModel = interp1d(self._defectSizes,
                               np.array(meanPOD),
                               kind='linear')
        self._PODmodel = ot.PythonFunction(1, 1, interpModel)
Пример #3
0
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot

# Defining parameters
dimension = 3
bounds = ot.Interval(dimension)
size = 25

# Build standard LHS algorithm
distribution = ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * dimension)
lhs = ot.LHSExperiment(distribution, size)
lhs.setRandomShift(False)  # centered
lhs.setAlwaysShuffle(True)  # randomized

# print the object
print("lhs=", lhs)
print("Bounds of uniform distributions=", distribution.getRange())

# Generate design without optimization
design = lhs.generate()
print("design=", design)

# Defining space fillings
spaceFillingC2 = ot.SpaceFillingC2()
spaceFillingPhiP = ot.SpaceFillingPhiP(10)

# print the criteria on this design
print("PhiP=%f, C2=%f" % (ot.SpaceFillingPhiP().evaluate(design),
                          ot.SpaceFillingC2().evaluate(design)))
Пример #4
0
from __future__ import print_function
import openturns as ot
from operator import itemgetter 
import openturns.viewer as viewer
from matplotlib import pylab as plt
ot.Log.Show(ot.Log.NONE)

# %%
# borehole model
dimension = 8
input_names = ['rw', 'r', 'Tu', 'Hu', 'Tl', 'Hl', 'L', 'Kw']
model = ot.SymbolicFunction(input_names,
                            ['(2*pi_*Tu*(Hu-Hl))/(ln(r/rw)*(1+(2*L*Tu)/(ln(r/rw)*rw^2*Kw)+Tu/Tl))'])
coll = [ot.Normal(0.1, 0.0161812),
         ot.LogNormal(7.71, 1.0056),
         ot.Uniform(63070.0, 115600.0),
         ot.Uniform(990.0, 1110.0),
         ot.Uniform(63.1, 116.0),
         ot.Uniform(700.0, 820.0),
         ot.Uniform(1120.0, 1680.0),
         ot.Uniform(9855.0, 12045.0)]
distribution = ot.ComposedDistribution(coll)
distribution.setDescription(input_names)

# %%
# Freeze r, Tu, Tl from model to go faster
selection = [1,2,4]
complement = ot.Indices(selection).complement(dimension)
distribution = distribution.getMarginal(complement)
model = ot.ParametricFunction(model, selection, distribution.getMarginal(selection).getMean())
input_names_copy = list(input_names)
from __future__ import print_function
import openturns as ot

ot.TESTPREAMBLE()

input_dimension = 3
output_dimension = 1

formula = [
    'sin(pi_*X1)+7*sin(pi_*X2)*sin(pi_*X2)+0.1*((pi_*X3)*(pi_*X3)*(pi_*X3)*(pi_*X3))*sin(pi_*X1)'
]

model = ot.SymbolicFunction(['X1', 'X2', 'X3'], formula)

distribution = ot.ComposedDistribution([ot.Uniform(-1.0, 1.0)] *
                                       input_dimension)

# Size of simulation
size = 10000

# Test with the various implementation methods
methods = ["Saltelli", "Jansen", "MauntzKucherenko", "Martinez"]

# Generate input/output designs
computeSO = True
inputDesign = ot.SobolIndicesExperiment(distribution, size,
                                        computeSO).generate()
outputDesign = model(inputDesign)
# Case 1 : Estimation of sensitivity using estimator and no bootstrap
for method in methods:
    D = 0.
    index = -1
    D_previous = 0.
    for i in range(n):
        F = distribution.computeCDF(sample[i])
        Fminus = F - float(i) / n
        Fplus = float(i + 1) / n - F
        D = max(Fminus, Fplus, D)
        if (D > D_previous):
            index = i
            D_previous = D
    return D


# %%
dist = ot.Uniform(0, 1)
dist

# %%
computeKSStatistics(sample, dist)

# %%
# The following function generates a sample of K.S. distances when the tested distribution is the `Uniform(0,1)` distribution.


# %%
def generateKSSampleKnownParameters(nrepeat, samplesize):
    """
    nrepeat : Number of repetitions, size of the table
    samplesize : the size of each sample to generate from the Uniform distribution
    """
Пример #7
0
    for i in range(coefficients.getDimension()):
        if abs(coefficients[i]) < 1.0e-8:
            coefficients[i] = 0.0
    return ot.UniVariatePolynomial(coefficients)


iMax = 5
distributionCollection = [
    ot.Laplace(0.0, 1.0),
    ot.Logistic(0.0, 1.0),
    ot.Normal(0.0, 1.0),
    ot.Normal(1.0, 1.0),
    ot.Rayleigh(1.0),
    ot.Student(22.0),
    ot.Triangular(-1.0, 0.3, 1.0),
    ot.Uniform(-1.0, 1.0),
    ot.Uniform(-1.0, 3.0),
    ot.WeibullMin(1.0, 3.0),
    ot.Beta(1.0, 2.0, -1.0, 1.0),
    ot.Beta(0.5, 0.5, -1.0, 1.0),
    ot.Beta(0.5, 0.5, -2.0, 3.0),
    ot.Gamma(1.0, 3.0),
    ot.Arcsine()
]
for n in range(len(distributionCollection)):
    distribution = distributionCollection[n]
    name = distribution.getClassName()
    polynomialFactory = ot.StandardDistributionPolynomialFactory(
        ot.AdaptiveStieltjesAlgorithm(distribution))
    print("polynomialFactory(", name, "=", polynomialFactory, ")")
    for i in range(iMax):
Пример #8
0
#!/usr/bin/env python
# coding:utf-8
"""Sample distribution.

Generate sampling using OpenTURNS. 

"""
import openturns as ot
import numpy as np
import json

n_samples = 100
dists = [ot.Uniform(20., 40.), ot.Normal(2345., 400.)]

settings_path = './'

with open(settings_path + 'settings.json', 'r') as f:
    settings = json.load(f)

distribution = ot.ComposedDistribution(dists, ot.IndependentCopula(len(dists)))
experiment = ot.LHSExperiment(distribution, n_samples, True, True)
sample = np.array(experiment.generate()).tolist()

settings['space']['sampling'] = sample

with open(settings_path + 'settings.json', 'w') as f:
    json.dump(settings, f, indent=4)
symbolicModel = persalys.SymbolicPhysicalModel(
    'symbolicModel', [x1, x2, x3], [fake_var, y0, fake_y0, y1],
    [formula_fake_var, formula_y0, formula_y0, formula_y1])

myStudy.add(symbolicModel)

# python model ##
code = 'from math import cos, sin, sqrt\n\ndef _exec(x1, x2, x3):\n    y0 = cos(0.5*x1) + sin(x2) + sqrt(x3)\n    return y0\n'
pythonModel = persalys.PythonPhysicalModel('pythonModel', [x1, x2, x3], [y0],
                                           code)
myStudy.add(pythonModel)

filename = 'data.csv'
cDist = ot.ComposedDistribution(
    [ot.Normal(), ot.Gumbel(),
     ot.Normal(), ot.Uniform()],
    ot.ComposedCopula([ot.IndependentCopula(2),
                       ot.GumbelCopula()]))
sample = cDist.getSample(200)
sample.exportToCSVFile(filename, ' ')

# Designs of Experiment ##

# fixed design ##
ot.RandomGenerator.SetSeed(0)
fixedDesign = persalys.FixedDesignOfExperiment('fixedDesign', symbolicModel)
inputSample = ot.LHSExperiment(
    ot.ComposedDistribution([ot.Uniform(0., 10.),
                             ot.Uniform(0., 10.)]), 10).generate()
inputSample.stack(ot.Sample(10, [0.5]))
fixedDesign.setOriginalInputSample(inputSample)
Пример #10
0
# %%
# Build a metamodel over each segment
degree = 5
samplingSize = 100
enumerateFunction = ot.LinearEnumerateFunction(dimension)
productBasis = ot.OrthogonalProductPolynomialFactory(
    [ot.LegendreFactory()] * dimension, enumerateFunction)
adaptiveStrategy = ot.FixedStrategy(
    productBasis, enumerateFunction.getStrataCumulatedCardinal(degree))
projectionStrategy = ot.LeastSquaresStrategy(
    ot.MonteCarloExperiment(samplingSize))

# %%
# Segment 1: (-1.0; 0.0)
d1 = ot.Uniform(-1.0, 0.0)
fc1 = ot.FunctionalChaosAlgorithm(f, d1, adaptiveStrategy, projectionStrategy)
fc1.run()
mm1 = fc1.getResult().getMetaModel()
graph = mm1.draw(-1.0, -1e-6)
view = viewer.View(graph)

# %%
# Segment 2: (0.0, 1.0)
d2 = ot.Uniform(0.0, 1.0)
fc2 = ot.FunctionalChaosAlgorithm(f, d2, adaptiveStrategy, projectionStrategy)
fc2.run()
mm2 = fc2.getResult().getMetaModel()
graph = mm2.draw(1e-6, 1.0)
view = viewer.View(graph)
#! /usr/bin/env python

import openturns as ot
import openturns.testing as ott
import math as m

ot.TESTPREAMBLE()
ot.RandomGenerator.SetSeed(0)

# Definition of the marginals
X1 = ot.Uniform(-m.pi, m.pi)
X2 = ot.Uniform(-m.pi, m.pi)
X3 = ot.Uniform(-m.pi, m.pi)

# 3d distribution made with independent marginals
distX = ot.ComposedDistribution([X1, X2, X3])

# Get a sample of it
size = 100
X = distX.getSample(size)

# The Ishigami model
modelIshigami = ot.SymbolicFunction(
    ["X1", "X2", "X3"], ["sin(X1) + 5.0 * (sin(X2))^2 + 0.1 * X3^4 * sin(X1)"])

# Apply model: Y = m(X)
Y = modelIshigami(X)

# We define the covariance models for the HSIC indices.
# For the input, we consider a SquaredExponential covariance model.
covarianceModelCollection = ot.CovarianceModelCollection()
Пример #12
0
#ot.Log.Show(ot.Log.Info)

calJ = ot.SymbolicFunction(['x0', 'x1', 'theta'],
                           ['(x0-2)^2 + 2*x1^2 - 4*x1 + theta'])
calG = ot.SymbolicFunction(['x0', 'x1', 'theta'],
                           ['-(-x0 + 4*x1 + theta - 3)'])
J = ot.ParametricFunction(calJ, [2], [2.0])
g = ot.ParametricFunction(calG, [2], [2.0])

dim = J.getInputDimension()

solver = ot.Cobyla()
solver.setMaximumIterationNumber(1000)

thetaDist = ot.Uniform(1.0, 3.0)
robustnessMeasure = otrobopt.MeanMeasure(J, thetaDist)
reliabilityMeasure = otrobopt.JointChanceMeasure(g, thetaDist, ot.Greater(),
                                                 0.9)
problem = otrobopt.RobustOptimizationProblem(robustnessMeasure,
                                             reliabilityMeasure)
bounds = ot.Interval([-10.0] * dim, [10.0] * dim)
problem.setBounds(bounds)

algo = otrobopt.SequentialMonteCarloRobustAlgorithm(problem, solver)
algo.setMaximumIterationNumber(10)
algo.setMaximumAbsoluteError(1e-3)
algo.setInitialSamplingSize(10)
algo.setInitialSearch(100)
algo.run()
result = algo.getResult()
Пример #13
0
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot

ot.TESTPREAMBLE()
ot.RandomGenerator.SetSeed(0)


def checkMarginals(coll):
    osmc = ot.OrderStatisticsMarginalChecker(coll)
    print("marginals=", coll)
    print("isCompatible=", osmc.isCompatible())
    print("partition=", osmc.buildPartition())


coll = [
    ot.Uniform(-1.0, 1.0),
    ot.LogUniform(1.0, 1.2),
    ot.Triangular(3.0, 4.0, 5.),
    ot.Uniform(5.0, 6.0),
    ot.Uniform(5.5, 6.5)
]
checkMarginals(coll)
coll.append(ot.Uniform(0.0, 1.0))
checkMarginals(coll)
Пример #14
0
    myStudy.add('randomGeneratorState', randomGeneratorState)

    # Create a GeneralLinearModelResult
    generalizedLinearModelResult = ot.GeneralLinearModelResult()
    generalizedLinearModelResult.setName('generalizedLinearModelResult')
    myStudy.add('generalizedLinearModelResult', generalizedLinearModelResult)

    # KDTree
    sample = ot.Normal(3).getSample(10)
    kDTree = ot.KDTree(sample)
    myStudy.add('kDTree', kDTree)

    # TensorApproximationAlgorithm/Result
    dim = 1
    model = ot.SymbolicFunction(['x'], ['x*sin(x)'])
    distribution = ot.ComposedDistribution([ot.Uniform()] * dim)
    factoryCollection = [ot.FourierSeriesFactory()] * dim
    functionFactory = ot.OrthogonalProductFunctionFactory(factoryCollection)
    size = 10
    X = distribution.getSample(size)
    Y = model(X)
    nk = [5] * dim
    rank = 1
    algo = ot.TensorApproximationAlgorithm(X, Y, distribution, functionFactory,
                                           nk, rank)
    algo.run()
    tensorResult = algo.getResult()
    myStudy.add('tensorResult', tensorResult)
    tensorIn = [0.4]
    tensorRef = tensorResult.getMetaModel()(tensorIn)
# We see that the Rastrigin function has several local minima. However, there is only one single global minimum at :math:`\vect{x}^\star=(0, 0)`.

# %%
# Create the problem and set the optimization algorithm
# -----------------------------------------------------

# %%
problem = ot.OptimizationProblem(rastrigin)

# %%
# We use the :class:`~openturns.Cobyla` algorithm and run it from multiple starting points selected by a :class:`~openturns.LowDiscrepancyExperiment`.

# %%
size = 64
distribution = ot.ComposedDistribution(
    [ot.Uniform(lowerbound[0], upperbound[0])] * dim)
experiment = ot.LowDiscrepancyExperiment(ot.SobolSequence(), distribution,
                                         size)
solver = ot.MultiStart(ot.Cobyla(problem), experiment.generate())

# %%
# Visualize the starting points of the optimization algorithm
# -----------------------------------------------------------

# %%
startingPoints = solver.getStartingSample()
graph = rastrigin.draw(lowerbound, upperbound, [100] * dim)
graph.setTitle("Rastrigin function")
cloud = ot.Cloud(startingPoints)
cloud.setPointStyle("bullet")
cloud.setColor("black")
import openturns as ot
import otrobopt
from matplotlib import pyplot as plt
from openturns.viewer import View

thetaDist = ot.Normal(2.0, 0.1)
if 'IndividualChanceMeasure' == 'WorstCaseMeasure':
    thetaDist = ot.Uniform(-1.0, 4.0)
elif 'ChanceMeasure' in 'IndividualChanceMeasure':
    thetaDist = ot.Normal(1.0, 1.0)

f_base = ot.SymbolicFunction(['x', 'theta'], ['x*theta'])
f = ot.ParametricFunction(f_base, [1], thetaDist.getMean())

if 'IndividualChanceMeasure' == 'JointChanceMeasure':
    measure = otrobopt.JointChanceMeasure(f, thetaDist, ot.GreaterOrEqual(),
                                          0.95)
elif 'IndividualChanceMeasure' == 'IndividualChanceMeasure':
    measure = otrobopt.IndividualChanceMeasure(f, thetaDist,
                                               ot.GreaterOrEqual(), [0.95])
elif 'IndividualChanceMeasure' == 'MeanStandardDeviationTradeoffMeasure':
    measure = otrobopt.MeanStandardDeviationTradeoffMeasure(
        f, thetaDist, [0.8])
elif 'IndividualChanceMeasure' == 'QuantileMeasure':
    measure = otrobopt.QuantileMeasure(f, thetaDist, 0.99)
else:
    measure = otrobopt.IndividualChanceMeasure(f, thetaDist)

N = 10
experiment = ot.LHSExperiment(N)
factory = otrobopt.MeasureFactory(experiment)
Пример #17
0
#!/usr/bin/env python

import openturns as ot
import persalys

Study_0 = persalys.Study('Study_0')
persalys.Study.Add(Study_0)

# variables
dist_z0 = ot.Uniform(100, 150)
z0 = persalys.Input('z0', 100, dist_z0, '')
dist_v0 = ot.Normal(55, 10)
v0 = persalys.Input('v0', 55, dist_v0, '')
dist_m = ot.Normal(80, 8)
m = persalys.Input('m', 80, dist_m, '')
dist_c = ot.Uniform(0, 30)
c = persalys.Input('c', 16, dist_c, '')
z1 = persalys.Output('z1', '')
z2 = persalys.Output('z2', 'fake output')
inputs = [z0, v0, m, c]
outputs = [z1, z2]

# mesh model
meshModel = persalys.GridMeshModel(ot.Interval(0., 12.), [20])

# Python model
code = 'from math import exp\n\ndef _exec(z0,v0,m,c):\n    g = 9.81\n    zmin = 0.\n    tau = m / c\n    vinf = -m * g / c\n\n    # mesh nodes\n    t = getMesh().getVertices()\n\n    z = [max(z0 + vinf * t_i[0] + tau * (v0 - vinf) * (1 - exp(-t_i[0] / tau)), zmin) for t_i in t]\n    z2 = [2*max(z0 + vinf * t_i[0] + tau * (v0 - vinf) * (1 - exp(-t_i[0] / tau)), zmin) for t_i in t]\n\n    return z, z2'
PhysicalModel_1 = persalys.PythonFieldModel('PhysicalModel_1', meshModel, inputs, outputs, code)
Study_0.add(PhysicalModel_1)

# central tendency
graph = distribution.drawPDF()
graph.setTitle("Bivariate Student PDF")
view = otv.View(graph)

# %%
# The UserDefined distribution
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# We can also define our own distribution with the :class:`~openturns.UserDefined` distribution.
# For instance consider the square :math:`[-1,1] \times [-1, 1]` with some
# random points uniformly drawn. For each point the weight chosen is the square
# of the distance to the origin. The :class:`~openturns.UserDefined` class normalizes the weights.

# %%
# We first generate random points in the square.
distUniform2 = ot.ComposedDistribution([ot.Uniform(-1.0, 1.0)] * 2)
N = 100
sample = distUniform2.getSample(N)

# %%
# We then build the points and weights for the `UserDefined` distribution.
points = []
weights = []
for i in range(N):
    points.append(sample[i, :])
    weights.append((sample[i, 0]**2 + sample[i, 1]**2)**2)

# %%
# We build the distribution :
distribution = ot.UserDefined(points, weights)
graph = distribution.drawPDF()
# We first define the time grid associated with the model.

# %%
tmin = 0.0  # Minimum time
tmax = 12.  # Maximum time
gridsize = 100  # Number of time steps
mesh = ot.IntervalMesher([gridsize - 1]).build(ot.Interval(tmin, tmax))

# %%
vertices = mesh.getVertices()

# %%
# Creation of the input distribution.

# %%
distZ0 = ot.Uniform(100.0, 150.0)
distV0 = ot.Normal(55.0, 10.0)
distM = ot.Normal(80.0, 8.0)
distC = ot.Uniform(0.0, 30.0)
distribution = ot.ComposedDistribution([distZ0, distV0, distM, distC])

# %%
dimension = distribution.getDimension()
dimension

# %%
# Then we define the Python function which computes the altitude at each time value. In order to compute all altitudes with a vectorized evaluation, we first convert the vertices into a Numpy `array` and use the Numpy functions`exp` and `maximum`: this increases the evaluation performance of the script.


# %%
def AltiFunc(X):
Пример #20
0
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
ot.RandomGenerator.SetSeed(0)

# Generate sample with the given plane
distribution = ot.ComposedDistribution([ot.Uniform(0, 1)] * 2)
size = 10
experiment = ot.LHSExperiment(distribution, size)

sample = experiment.generate()

# Create an empty graph
graph = ot.Graph("LHS experiment", "x1", "x2", True, "")

# Create the cloud
cloud = ot.Cloud(sample, "blue", "fsquare", "")

# Then, draw it
graph.add(cloud)

fig = plt.figure(figsize=(4, 4))
axis = fig.add_subplot(111)
axis.set_xlim(auto=True)
View(graph, figure=fig, axes=[axis], add_legend=False)
Пример #21
0
import os

fileName = 'myStudy.xml'

# Create a Study Object
myStudy = ot.Study()
myStudy.setStorageManager(ot.XMLStorageManager(fileName))

thetaDist = ot.Normal(2.0, 0.1)
f_base = ot.SymbolicFunction(['x', 'theta'], ['x*theta'])
f = ot.ParametricFunction(f_base, [1], [1.0])

measures = [
    otrobopt.MeanMeasure(f, thetaDist),
    otrobopt.VarianceMeasure(f, thetaDist),
    otrobopt.WorstCaseMeasure(f, ot.Uniform(-1.0, 4.0), False),
    otrobopt.JointChanceMeasure(f, ot.Normal(1.0, 1.0), ot.GreaterOrEqual(),
                                0.95),
    otrobopt.IndividualChanceMeasure(f, ot.Normal(1.0, 1.0),
                                     ot.GreaterOrEqual(), [0.95]),
    otrobopt.MeanStandardDeviationTradeoffMeasure(f, thetaDist, [0.8]),
    otrobopt.QuantileMeasure(f, thetaDist, 0.99)
]
aggregated = otrobopt.AggregatedMeasure(measures)
measures.append(aggregated)

for measure in measures:
    myStudy.add('measure' + measure.__class__.__name__, measure)

measure2 = otrobopt.MeanMeasure(f, thetaDist)
measureFunction = otrobopt.MeasureFunction(measure2)
Пример #22
0
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
if ot.Uniform().__class__.__name__ == 'ComposedDistribution':
    correlation = ot.CorrelationMatrix(2)
    correlation[1, 0] = 0.25
    aCopula = ot.NormalCopula(correlation)
    marginals = [ot.Normal(1.0, 2.0), ot.Normal(2.0, 3.0)]
    distribution = ot.ComposedDistribution(marginals, aCopula)
elif ot.Uniform().__class__.__name__ == 'CumulativeDistributionNetwork':
    distribution = ot.CumulativeDistributionNetwork([ot.Normal(2),ot.Dirichlet([0.5, 1.0, 1.5])], ot.BipartiteGraph([[0,1], [0,1]]))
elif ot.Uniform().__class__.__name__ == 'Histogram':
    distribution = ot.Histogram([-1.0, 0.5, 1.0, 2.0], [0.45, 0.4, 0.15])
else:
    distribution = ot.Uniform()
dimension = distribution.getDimension()
if dimension == 1:
    distribution.setDescription(['$x$'])
    pdf_graph = distribution.drawPDF()
    cdf_graph = distribution.drawCDF()
    fig = plt.figure(figsize=(10, 4))
    plt.suptitle(str(distribution))
    pdf_axis = fig.add_subplot(121)
    cdf_axis = fig.add_subplot(122)
    View(pdf_graph, figure=fig, axes=[pdf_axis], add_legend=False)
    View(cdf_graph, figure=fig, axes=[cdf_axis], add_legend=False)
elif dimension == 2:
    distribution.setDescription(['$x_1$', '$x_2$'])
    pdf_graph = distribution.drawPDF()
    fig = plt.figure(figsize=(10, 5))
    plt.suptitle(str(distribution))
Пример #23
0
#   
# with Theta obtained with the random variable Y through a function f
#
# .. math:: 
#    \underline{\Theta}=f(\underline{Y})
#

# %%
from __future__ import print_function
import openturns as ot
import openturns.viewer as viewer
from matplotlib import pylab as plt

# %%
# create the Y distribution
YDist = ot.Uniform(-1.0, 1.0)

# %%
# create Theta=f(y)
f = ot.SymbolicFunction(['y'], ['y', '1+y^2'])

# %%
# create the X|Theta distribution
XgivenThetaDist = ot.Uniform()

# %%
# create the distribution
XDist = ot.ConditionalDistribution(XgivenThetaDist, YDist, f)
XDist.setDescription(['X|Theta=f(y)'])
XDist
Пример #24
0
    ot.RegularGrid(-1.0, -0.1, 20),
    ot.RegularGrid(-1.0, 0.13, 20),
    ot.RegularGrid(1.0, -0.13, 20)
]

for regularGrid in grids:
    lowerBound = regularGrid.getLowerBound()[0]
    upperBound = regularGrid.getUpperBound()[0]
    n = regularGrid.getSimplicesNumber()
    print("regularGrid=", regularGrid, "lowerBound=", lowerBound,
          "upperBound=", upperBound, n, "simplices")
    algo = ot.RegularGridEnclosingSimplex(regularGrid)

    ot.RandomGenerator.SetSeed(0)
    test = ot.Sample(
        ot.Uniform(lowerBound - 0.2 * (upperBound - lowerBound), upperBound +
                   0.2 * (upperBound - lowerBound)).getSample(1000))

    vertices = regularGrid.getVertices()
    for vertex in test:
        index = algo.query(vertex)
        x = vertex[0]
        if x < lowerBound or x > upperBound:
            if index < n:
                print("Point", x, "should be outside but query returned index",
                      index)
                os.exit(1)
        else:
            if index >= n:
                print("Point", x, "should be inside but query returned index",
                      index)
                os.exit(1)
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
if ot.KPermutationsDistribution().__class__.__name__ == 'Bernoulli':
    distribution = ot.Bernoulli(0.7)
elif ot.KPermutationsDistribution().__class__.__name__ == 'Binomial':
    distribution = ot.Binomial(5, 0.2)
elif ot.KPermutationsDistribution(
).__class__.__name__ == 'ComposedDistribution':
    copula = ot.IndependentCopula(2)
    marginals = [ot.Uniform(1.0, 2.0), ot.Normal(2.0, 3.0)]
    distribution = ot.ComposedDistribution(marginals, copula)
elif ot.KPermutationsDistribution(
).__class__.__name__ == 'CumulativeDistributionNetwork':
    coll = [ot.Normal(2), ot.Dirichlet([0.5, 1.0, 1.5])]
    distribution = ot.CumulativeDistributionNetwork(
        coll, ot.BipartiteGraph([[0, 1], [0, 1]]))
elif ot.KPermutationsDistribution().__class__.__name__ == 'Histogram':
    distribution = ot.Histogram([-1.0, 0.5, 1.0, 2.0], [0.45, 0.4, 0.15])
elif ot.KPermutationsDistribution().__class__.__name__ == 'KernelMixture':
    kernel = ot.Uniform()
    sample = ot.Normal().getSample(5)
    bandwith = [1.0]
    distribution = ot.KernelMixture(kernel, bandwith, sample)
elif ot.KPermutationsDistribution(
).__class__.__name__ == 'MaximumDistribution':
    coll = [
        ot.Uniform(2.5, 3.5),
        ot.LogUniform(1.0, 1.2),
        ot.Triangular(2.0, 3.0, 4.0)
    ]
Пример #26
0
    B = 300.0
    Zd = Zb + Hd
    Q, Ks, Zv, Zm = X
    alpha = (Zm - Zv) / L
    H = (Q / (Ks * B * alpha**0.5))**0.6
    Zc = H + Zv
    S = Zc - Zd
    return [S]


myFunction = ot.PythonFunction(4, 1, flooding)
Q = ot.Gumbel(558.0, 1013.0)
Q = ot.TruncatedDistribution(Q, 0.0, ot.SpecFunc.MaxScalar)
Ks = ot.Normal(30.0, 7.5)
Ks = ot.TruncatedDistribution(Ks, 0.0, ot.SpecFunc.MaxScalar)
Zv = ot.Uniform(49.0, 51.0)
Zm = ot.Uniform(54.0, 56.0)
inputX = ot.ComposedDistribution([Q, Ks, Zv, Zm])
inputX.setDescription(["Q", "Ks", "Zv", "Zm"])

size = 5000
computeSO = True
inputDesign = ot.SobolIndicesExperiment(inputX, size, computeSO).generate()
outputDesign = myFunction(inputDesign)
sensitivityAnalysis = ot.SaltelliSensitivityAlgorithm(inputDesign,
                                                      outputDesign, size)

graph = sensitivityAnalysis.draw()

fig = plt.figure(figsize=(8, 4))
axis = fig.add_subplot(111)
Пример #27
0
basis = ot.ConstantBasisFactory(dim).build()
print(basis)

# 2. covariance model
cov = ot.MaternModel([1.], [2.5], 1.5)
print(cov)

# 3. kriging algorithm
algokriging = ot.KrigingAlgorithm(x, y, cov, basis)

# error measure
# algokriging.setNoise([5*1e-1]*n_pt)

# 4. Optimization
# algokriging.setOptimizationAlgorithm(ot.NLopt('GN_DIRECT'))
lhsExperiment = ot.LHSExperiment(ot.Uniform(1e-1, 1e2), 50)
algokriging.setOptimizationAlgorithm(
    ot.MultiStart(ot.TNC(), lhsExperiment.generate()))
algokriging.setOptimizationBounds(ot.Interval([0.1], [1e2]))

# if we choose not to optimize parameters
# algokriging.setOptimizeParameters(False)

# 5. run the algorithm
algokriging.run()

# %%
# Results
# -------

# %%
import openturns as ot
from openturns.viewer import View

# Sobol
d = ot.LowDiscrepancyExperiment(ot.SobolSequence(), ot.ComposedDistribution([ot.Uniform()]*3), 32)
s = d.generate()
s.setDescription(["X1", "X2", "X3"])
g = ot.Graph()
g.setTitle("Low discrepancy experiment")
g.setGridColor("black")
p = ot.Pairs(s)
g.add(p)
View(g)
Пример #29
0
graph = model.draw(lowerbound, upperbound, [100] * dim)
graph.setTitle("Ackley function")
view = viewer.View(graph)

# %%
# We see that the Ackley function has many local minimas. The global minimum, however, is unique and located at the center of the domain.

# %%
# Create the initial kriging
# ^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Before using the EGO algorithm, we must create an initial kriging. In order to do this, we must create a design of experiment which fills the space. In this situation, the `LHSExperiment` is a good place to start (but other design of experiments may allow to better fill the space). We use a uniform distribution in order to create a LHS design with 50 points.

# %%
listUniformDistributions = [
    ot.Uniform(lowerbound[i], upperbound[i]) for i in range(dim)
]
distribution = ot.ComposedDistribution(listUniformDistributions)
sampleSize = 50
experiment = ot.LHSExperiment(distribution, sampleSize)
inputSample = experiment.generate()
outputSample = model(inputSample)

# %%
graph = ot.Graph("Initial LHS design of experiment - n=%d" % (sampleSize),
                 "$x_0$", "$x_1$", True)
cloud = ot.Cloud(inputSample)
graph.add(cloud)
view = viewer.View(graph)

# %%
# %%
# In this example we are going to build a conditional random vector
#
# .. math::
#    \underline{X}|\underline{\Theta}
#

# %%
from __future__ import print_function
import openturns as ot
import openturns.viewer as viewer
from matplotlib import pylab as plt

# %%
# create the random vector Theta (parameters of X)
gammaDist = ot.Uniform(1.0, 2.0)
alphaDist = ot.Uniform(0.0, 0.1)
thetaDist = ot.ComposedDistribution([gammaDist, alphaDist])
thetaRV = ot.RandomVector(thetaDist)

# %%
# create the XgivenTheta distribution
XgivenThetaDist = ot.Exponential()

# %%
# create the X distribution
XDist = ot.ConditionalRandomVector(XgivenThetaDist, thetaRV)

# %%
# draw a sample
XDist.getSample(5)