w2 = 1.0 - w1
distribution1 = ot.Normal(0.0, 1.0)
distribution2 = ot.Normal(1.5, 1.0 / 3.0)

# %%
# We generate two independent sub-samples from the two Normal distributions.

# %%
sample1 = distribution1.getSample(int(w1 * n))
sample2 = distribution2.getSample(int(w2 * n))

# %%
# Then we merge the sub-samples into a larger one with the `add` method of the `Sample` class. 

# %%
sample = ot.Sample(sample1)
sample.add(sample2)
sample.getSize()

# %%
# In order to see the result, we build a kernel smoothing approximation on the sample. In order to keep it simple, let us use the default bandwidth selection rule.

# %%
factory = ot.KernelSmoothing()
fit = factory.build(sample)

# %%
graph = fit.drawPDF()
view = otv.View(graph)

# %%
Example #2
0
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot

basisSize = 3
sampleSize = 3

X = ot.Sample(sampleSize, 1)
for i in range(sampleSize):
    X[i, 0] = i + 1.0

Y = ot.Sample(sampleSize, 1)

phis = []
for j in range(basisSize):
    phis.append(ot.SymbolicFunction(['x'], ['x^' + str(j + 1)]))
basis = ot.Basis(phis)
for i in range(basisSize):
    print(ot.FunctionCollection(basis)[i](X))

proxy = ot.DesignProxy(X, basis)
full = range(basisSize)

design = proxy.computeDesign(full)
print(design)

proxy.setWeight([0.5] * sampleSize)
design = proxy.computeDesign(full)
print(design)
Example #3
0
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot
import math as m

ot.PlatformInfo.SetNumericalPrecision(6)
# 1D example
mesh1D = ot.Mesh()
print("Default 1D mesh=", mesh1D)
vertices = ot.Sample(0, 1)
vertices.add([0.5])
vertices.add([1.5])
vertices.add([2.1])
vertices.add([2.7])
simplicies = [[]] * 3
simplicies[0] = [0, 1]
simplicies[1] = [1, 2]
simplicies[2] = [2, 3]
mesh1D = ot.Mesh(vertices, simplicies)
print("1D mesh=", mesh1D)
print("Is empty? ", mesh1D.isEmpty())
print("vertices=", mesh1D.getVertices())
print("simplices=", mesh1D.getSimplices())
print("volume=", mesh1D.getVolume())
print("First simplex volume=", mesh1D.computeSimplexVolume(0))
p = [1.3]
print("is p=", p, " in mesh? ", mesh1D.contains(p))
point = [1.8]
print("Nearest index(", point, ")=", mesh1D.getNearestVertexIndex(point))
print("Nearest index(", point, "), simplex and coordinates=",
Example #4
0
size = 100

# Number of continuous distributions
continuousDistributionNumber = continuousDistributionCollection.getSize()
# Number of discrete distributions
discreteDistributionNumber = discreteDistributionCollection.getSize()
# Number of distributions
distributionNumber = continuousDistributionNumber + \
    discreteDistributionNumber

# We create a collection of Sample of size "size" and of
# dimension 1 (scalar values) : the collection has distributionNumber
# Samples

sampleCollection = [ot.Sample(size, 1) for i in range(distributionNumber)]
# We create a collection of Sample of size "size" and of
# dimension 1 (scalar values) : the collection has
# continuousDistributionNumber Samples
continuousSampleCollection = [
    ot.Sample(size, 1) for i in range(continuousDistributionNumber)
]
# We create a collection of Sample of size "size" and of
# dimension 1 (scalar values) : the collection has
# discreteDistributionNumber Samples
discreteSampleCollection = [
    ot.Sample(size, 1) for i in range(discreteDistributionNumber)
]

ot.RandomGenerator.SetSeed(0)
for i in range(continuousDistributionNumber):
#! /usr/bin/env python

import openturns as ot

ot.RandomGenerator.SetSeed(0)

size = 200

# input sample
inputSample = ot.Uniform(-1.0, 1.0).getSample(size)
outputSample = ot.Sample(inputSample)

# Evaluation of y = ax + b (a: scale, b: translate)

# scale
scale = [3.0]
outputSample *= scale

# translate sample
translate = [3.1]
outputSample += translate

# Finally inverse transform using an arbitrary lambda
lamb = [1.8]
boxCoxFunction = ot.InverseBoxCoxEvaluation(lamb)

# transform y using BoxCox function
outputSample = boxCoxFunction(outputSample)

# Add small noise
epsilon = ot.Normal(0, 1.0e-2).getSample(size)
Example #6
0
inputSample = ot.Sample([[0.608202, -0.632279], [-1.26617, -0.983758],
                         [-0.438266, -0.567843], [1.20548, 0.196576],
                         [-2.18139, 0.297528], [0.350042, 0.712356],
                         [-0.355007, -0.833663], [1.43725, -0.0922891],
                         [0.810668, 0.0103994], [0.793156, -0.959137],
                         [-0.470526, -0.653801], [0.261018, -0.629697],
                         [-2.29006, 0.691573], [-1.28289, 0.00556723],
                         [-1.31178, -0.456731], [-0.0907838, 0.282551],
                         [0.995793, 0.127445], [-0.139453, -0.889423],
                         [-0.560206, 0.602221], [0.44549, -0.708005],
                         [0.322925, 0.560746], [0.445785, 0.210698],
                         [-1.03808, -0.231704], [-0.856712, -0.0757071],
                         [0.473617, -0.747651], [-0.125498, -0.717586],
                         [0.351418, 0.533389], [1.78236, -0.601014],
                         [0.0702074, -0.283657], [-0.781366, 0.421392],
                         [-0.721533, 0.97622], [-0.241223, 0.649341],
                         [-1.78796, -0.447644], [0.40136, 0.524658],
                         [1.36783, -0.310752], [1.00434, 0.466132],
                         [0.741548, 0.318871], [-0.0436123, 0.45667],
                         [0.539345, 0.419467], [0.29995, -0.785465],
                         [0.407717, -0.957343], [-0.485112, -0.888291],
                         [-0.382992, -0.13238], [-0.752817, 0.881545],
                         [0.257926, 0.230244], [1.96876, -0.408034],
                         [-0.671291, 0.74598], [1.85579, -0.624525],
                         [0.0521593, 0.790653], [0.790446, 0.359935],
                         [0.716353, 0.868061], [-0.743622, -0.28315],
                         [0.184356, -0.605466], [-1.53073, 0.975779],
                         [0.655027, 0.415187], [0.538071, 0.0840439],
                         [1.73821, -0.278904], [-0.958722, 0.803063],
                         [0.377922, 0.745595], [-0.181004, -0.359175],
                         [1.67297, 0.992755], [-1.03896, -0.376385],
                         [-0.353552, -0.558697], [1.21381, -0.300297],
                         [-0.777033, 0.898571], [-1.36853, -0.904539],
                         [0.103474, -0.841734], [-0.89182, -0.878618],
                         [0.905602, -0.904866], [0.334794, -0.209856],
                         [-0.483642, 0.742799], [0.677958, -0.984481],
                         [1.70938, 0.926643], [1.07062, -0.699162],
                         [-0.506925, 0.536564], [-1.66086, 0.0400396],
                         [2.24623, -0.325119], [0.759602, 0.639517],
                         [-0.510764, -0.182467], [-0.633066, -0.975622],
                         [-0.957072, 0.00729759], [0.544047, -0.411715],
                         [0.814561, -0.540526], [-0.734708, 0.685758],
                         [-0.111461, 0.252678], [0.994482, -0.765926],
                         [-0.160625, -0.991518], [-0.938771, -0.808189],
                         [-1.96869, -0.171358], [-0.657603, -0.516637],
                         [0.338751, 0.396634], [1.01556, 0.586587],
                         [0.637167, -0.369344], [-0.0899071, 0.0029418],
                         [-0.855886, 0.866255], [1.27128, 0.942399],
                         [-0.238253, -0.569602], [1.3263, -0.27135],
                         [2.11968, 0.427897], [-0.901581, 0.276064]])
for i in range(len(levels)):
    contour.setLevels([levels[i]])
    # Inline the level values
    contour.setDrawLabels(True)
    # We have to copy the drawable because a Python list stores only pointers
    drawables.append(ot.Drawable(contour))

graphFineTune = ot.Graph("The exact Branin model", r"$x_1$", r"$x_2$", True,
                         '')
graphFineTune.setDrawables(drawables)  # Replace the drawables
graphFineTune.setLegendPosition("")  # Remove the legend
graphFineTune.setColors(palette)  # Add colors

# %%
# We also represent the three minima of the Branin function with orange diamonds :
sample1 = ot.Sample([bm.xexact1, bm.xexact2, bm.xexact3])
cloud1 = ot.Cloud(sample1, 'orange', 'diamond', 'First Cloud')
graphFineTune.add(cloud1)
view = otv.View(graphFineTune)

#
# The values of the exact model at these points are :
print(bm.objectiveFunction(sample1))

# %%
# The Branin function has a global minimum attained at three different points. We shall build a
# metamodel of this function that presents the same behaviour.

# %%
# Definition of the Kriging metamodel
# -----------------------------------
Example #8
0
# -*- coding: utf-8 -*-
# Copyright (C) 2016 - Michael Baudin

import openturns as ot
from numpy import array, arange
'''
Logistic Growth

Reference
Differential equations, 4th ed., Braun, 1993, 
TAM  Chap.1, "First order differential equations"   
p. 28
'''
# Donnees reelles
ustime = arange(1790, 2001, 10)
uspop=array([3.9,5.3,7.2,9.6,13.,17.,23.,31.,39.,\
 50.,62.,76.,92.,106.,123.,132.,151.,179.,\
 203.,221.,250.,281.])

sampleSize = len(ustime)

observedSample = ot.Sample(sampleSize, 2)
observedSample.setDescription(["Date (Annees)", "Population (Millions)"])
observedSample[:, 0] = ustime.reshape((sampleSize, 1))
observedSample[:, 1] = uspop.reshape((sampleSize, 1))

observedSample.exportToCSVFile("calage-logistique-observations.csv")
Example #9
0
import otfmi.example.utility

path_fmu = otfmi.example.utility.get_path_fmu("deviation")

# %%
# Wrap the FMU into an OpenTURNS function:

function = otfmi.FMUFunction(path_fmu,
                             inputs_fmu=["E", "F", "L", "I"],
                             outputs_fmu=["y"])
print(type(function))

# %%
# Simulate the FMU on a point:

import openturns as ot

inputPoint = ot.Point([3.0e7, 30000, 200, 400])
outputPoint = function(inputPoint)
print("y = {}".format(outputPoint))

# %%
# Simulate the FMU on a sample:

inputSample = ot.Sample([[3.0e7, 30000, 200, 400], [3.0e7, 30000, 250, 400],
                         [3.0e7, 30000, 300, 400]])
inputSample.setDescription(["E", "F", "L", "I"])

outputSample = function(inputSample)
print(outputSample)
inputRandomVector = ot.ComposedDistribution([Theta1, Theta2, Theta3])

candidate = ot.Point([8.0, 9.0, -6.0])

calibratedIndices = [0, 1, 2]
model = ot.ParametricFunction(g, calibratedIndices, candidate)

outputObservationNoiseSigma = 0.05
meanNoise = ot.Point(outputDimension)
covarianceNoise = ot.Point(outputDimension, outputObservationNoiseSigma)
R = ot.IdentityMatrix(outputDimension)
observationOutputNoise = ot.Normal(meanNoise, covarianceNoise, R)

size = 1000
inputObservations = ot.Sample(size, 0)

# Generate exact outputs
inputSample = inputRandomVector.getSample(size)
outputStress = g(inputSample)
# Add noise
sampleNoise = observationOutputNoise.getSample(size)
outputObservations = outputStress + sampleNoise
# Calibrate
algo = ot.LinearLeastSquaresCalibration(model, inputObservations,
                                        outputObservations, candidate, "SVD")
algo.run()
calibrationResult = algo.getResult()

# Check residual distribution
residualDistribution = calibrationResult.getObservationsError()
Example #11
0
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot
from math import sin

ot.TESTPREAMBLE()

try:

    # lm build
    print(
        "Fit y ~ 3 - 2 x + 0.05 * sin(x) model using 20 points (sin(x) ~ noise)"
    )
    size = 20
    oneSample = ot.Sample(size, 1)
    twoSample = ot.Sample(size, 1)
    for i in range(size):
        oneSample[i, 0] = 7.0 * sin(-3.5 + (6.5 * i) / (size - 1.0)) + 2.0
        twoSample[
            i, 0] = -2.0 * oneSample[i, 0] + 3.0 + 0.05 * sin(oneSample[i, 0])

    test = ot.LinearModelAlgorithm(oneSample, twoSample)
    result = ot.LinearModelResult(test.getResult())
    print("trend coefficients = ", result.getTrendCoefficients())

    print("Fit y ~ 1 + 0.1 x + 10 x^2 model using 100 points")
    ot.RandomGenerator.SetSeed(0)
    size = 100
    # Define a linespace from 0 to 10 with size points
    # We use a Box expermient ==> remove 0 & 1 points
Example #12
0
ot.ResourceMap.SetAsBool(
    'GeneralLinearModelAlgorithm-UseAnalyticalAmplitudeEstimate', False)

inputSample = ot.Sample(
    [[4.59626812e+00, 7.46143339e-02, 1.02231538e+00, 8.60042277e+01],
     [4.14315790e+00, 4.20801346e-02, 1.05874908e+00, 2.65757364e+01],
     [4.76735111e+00, 3.72414824e-02, 1.05730385e+00, 5.76058433e+01],
     [4.82811977e+00, 2.49997658e-02, 1.06954641e+00, 2.54461380e+01],
     [4.48961094e+00, 3.74562922e-02, 1.04943946e+00, 6.19483646e+00],
     [5.05605334e+00, 4.87599783e-02, 1.06520409e+00, 3.39024904e+00],
     [5.69679328e+00, 7.74915877e-02, 1.04099514e+00, 6.50990466e+01],
     [5.10193991e+00, 4.35520544e-02, 1.02502536e+00, 5.51492592e+01],
     [4.04791970e+00, 2.38565932e-02, 1.01906882e+00, 2.07875350e+01],
     [4.66238956e+00, 5.49901237e-02, 1.02427200e+00, 1.45661275e+01],
     [4.86634219e+00, 6.04693570e-02, 1.08199374e+00, 1.05104730e+00],
     [4.13519347e+00, 4.45225831e-02, 1.01900124e+00, 5.10117047e+01],
     [4.92541940e+00, 7.87692335e-02, 9.91868726e-01, 8.32302238e+01],
     [4.70722074e+00, 6.51799251e-02, 1.10608515e+00, 3.30181002e+01],
     [4.29040932e+00, 1.75426222e-02, 9.75678838e-01, 2.28186756e+01],
     [4.89291400e+00, 2.34997929e-02, 1.07669835e+00, 5.38926138e+01],
     [4.44653744e+00, 7.63175936e-02, 1.06979154e+00, 5.19109415e+01],
     [3.99977452e+00, 5.80430585e-02, 1.01850716e+00, 7.61988190e+01],
     [3.95491570e+00, 1.09302814e-02, 1.03687664e+00, 6.09981789e+01],
     [5.16424368e+00, 2.69026464e-02, 1.06673711e+00, 2.88708887e+01],
     [5.30491620e+00, 4.53802273e-02, 1.06254792e+00, 3.03856837e+01],
     [4.92809155e+00, 1.20616369e-02, 1.00700410e+00, 7.02512744e+00],
     [4.68373805e+00, 6.26028935e-02, 1.05152117e+00, 4.81271603e+01],
     [5.32381954e+00, 4.33013582e-02, 9.90522007e-01, 6.56015973e+01],
     [4.35455857e+00, 1.23814619e-02, 1.01810539e+00, 1.10769534e+01]])

signals = ot.Sample([[37.305445], [35.466919], [43.187991], [45.305165],
                     [40.121222], [44.609524], [45.14552], [44.80595],
def test_one_input_one_output():
    sampleSize = 6
    dimension = 1

    f = ot.SymbolicFunction(['x0'], ['x0 * sin(x0)'])

    X = ot.Sample(sampleSize, dimension)
    X2 = ot.Sample(sampleSize, dimension)
    for i in range(sampleSize):
        X[i, 0] = 3.0 + i
        X2[i, 0] = 2.5 + i
    X[0, 0] = 1.0
    X[1, 0] = 3.0
    X2[0, 0] = 2.0
    X2[1, 0] = 4.0
    Y = f(X)
    Y2 = f(X2)

    # create covariance model
    basis = ot.ConstantBasisFactory(dimension).build()
    covarianceModel = ot.SquaredExponential()

    # create algorithm
    algo = ot.KrigingAlgorithm(X, Y, covarianceModel, basis)

    # set sensible optimization bounds and estimate hyperparameters
    algo.setOptimizationBounds(ot.Interval(X.getMin(), X.getMax()))
    algo.run()

    # perform an evaluation
    result = algo.getResult()

    ott.assert_almost_equal(result.getMetaModel()(X), Y)
    ott.assert_almost_equal(result.getResiduals(), [1.32804e-07], 1e-3, 1e-3)
    ott.assert_almost_equal(result.getRelativeErrors(), [5.20873e-21])

    # Kriging variance is 0 on learning points
    covariance = result.getConditionalCovariance(X)
    nullMatrix = ot.Matrix(sampleSize, sampleSize)
    ott.assert_almost_equal(covariance, nullMatrix, 0.0, 1e-13)

    # Kriging variance is non-null on validation points
    validCovariance = result.getConditionalCovariance(X2)
    values = ot.Matrix([[
        0.81942182, -0.35599947, -0.17488593, 0.04622401, -0.03143555,
        0.04054783
    ],
                        [
                            -0.35599947, 0.20874735, 0.10943841, -0.03236419,
                            0.02397483, -0.03269184
                        ],
                        [
                            -0.17488593, 0.10943841, 0.05832917, -0.01779918,
                            0.01355719, -0.01891618
                        ],
                        [
                            0.04622401, -0.03236419, -0.01779918, 0.00578327,
                            -0.00467674, 0.00688697
                        ],
                        [
                            -0.03143555, 0.02397483, 0.01355719, -0.00467674,
                            0.0040267, -0.00631173
                        ],
                        [
                            0.04054783, -0.03269184, -0.01891618, 0.00688697,
                            -0.00631173, 0.01059488
                        ]])
    ott.assert_almost_equal(validCovariance - values, nullMatrix, 0.0, 1e-7)

    # Covariance per marginal & extract variance component
    coll = result.getConditionalMarginalCovariance(X)
    var = [mat[0, 0] for mat in coll]
    ott.assert_almost_equal(var, [0] * sampleSize, 1e-14, 1e-13)

    # Variance per marginal
    var = result.getConditionalMarginalVariance(X)
    ott.assert_almost_equal(var, ot.Point(sampleSize), 1e-14, 1e-13)

    # Prediction accuracy
    ott.assert_almost_equal(Y2, result.getMetaModel()(X2), 0.3, 0.0)
Example #14
0
linearProfile = ot.LinearProfile(T0, iMax)

# 2) Simulated Annealing LHS with linear temperature profile, PhiP optimization
optimalLHSAlgorithm = ot.SimulatedAnnealingLHS(
    lhs, linearProfile, spaceFillingPhiP)
print("lhs=", optimalLHSAlgorithm)
design = optimalLHSAlgorithm.generate()
print(
    "Generating design using linear temperature profile & PhiP criterion =", design)
result = optimalLHSAlgorithm.getResult()
print("Final criteria: C2=%f, PhiP=%f, MinDist=%f" %
      (result.getC2(), result.getPhiP(), result.getMinDist()))

# 3) Simulated Annealing LHS with geometric temperature profile, PhiP
# optimization & initial design
initialDesign = ot.Sample(design)
optimalLHSAlgorithm = ot.SimulatedAnnealingLHS(
    initialDesign, distribution, geomProfile, spaceFillingPhiP)
print("lhs=", optimalLHSAlgorithm)
print("initial design=", initialDesign)
print("PhiP=%f, C2=%f" %
      (ot.SpaceFillingPhiP().evaluate(design), ot.SpaceFillingC2().evaluate(design)))
design = optimalLHSAlgorithm.generate()
print(
    "Generating design using linear temperature profile & PhiP criterion =", design)
result = optimalLHSAlgorithm.getResult()
print("Final criteria: C2=%f, PhiP=%f, MinDist=%f" %
      (result.getC2(), result.getPhiP(), result.getMinDist()))
# 4) Simulated Annealing LHS with linear temperature profile, PhiP
# optimization and nStart > 1
nStart = 10
Example #15
0

z = np.array([77.88,
              71.03,
              27.97,
              63.41,
              57.76,
              64.63,
              33.54,
              65.64,
              92.53,
              67.06])


x = np.column_stack((x1, x2))
X = ot.Sample(x)
z = ot.Sample(np.reshape(z, (len(z), 1)))


dimension = len(x[0])

basis = ot.ConstantBasisFactory(dimension).build()
# basis = ot.LinearBasisFactory(dimension).build()
# basis = ot.QuadraticBasisFactory(dimension).build()

covarianceModel = ot.SquaredExponential([10], [1.895])
# covarianceModel = ot.MaternModel()

algo = ot.KrigingAlgorithm(X, z, covarianceModel, basis)
# algo.setNoise([0.2]*len(z)) # nugget
algo.run()
    algo.run()
    calibrationResult = algo.getResult()

    # Analysis of the results
    # Maximum A Posteriori estimator
    thetaMAP = calibrationResult.getParameterMAP()
    exactTheta = ot.Point([5.69186, 0.0832132, 0.992301])
    rtol = 1.0e-2
    assert_almost_equal(thetaMAP, exactTheta, rtol)

    # Covariance matrix of theta
    thetaPosterior = calibrationResult.getParameterPosterior()
    covarianceThetaStar = matrixToSample(thetaPosterior.getCovariance())
    exactCovarianceTheta = ot.Sample([
        [0.308302, -0.000665387, 6.81135e-05],
        [-0.000665387, 8.36243e-06, -8.86775e-07],
        [6.81135e-05, -8.86775e-07, 9.42234e-08],
    ])
    assert_almost_equal(covarianceThetaStar, exactCovarianceTheta)

    # Check other fields
    print("result=", calibrationResult)

    # 2. Check with global error covariance
    print("Global error covariance")
    algo = ot.GaussianLinearCalibration(model, x, y, candidate,
                                        priorCovariance, globalErrorCovariance,
                                        method)
    algo.run()
    calibrationResult = algo.getResult()
    def _convert_exec_sample_ot(self, output):
        """Converts the output of the batch function passed to the class into
        a basic openturns object, and makes some checks on the dimensions.

        Note
        ----
        If the checks fail, the output can still be found under self.__output_backup__
        """
        print(
            '''Using the batch evaluation function. Assumes that the outputs are in the
same order than for the single evaluation function. This one should only
return ProcessSamples, Samples, Lists or numpy arrays.''')
        outputList = []
        if len(output) != len(self._outputDescription):
            self.__nOutputs__ = len(output)
            self.setOutputDescription(
                ot.Description.BuildDefault(self.__nOutputs__, 'Y_'))
        for i, element in enumerate(output):
            if isinstance(element, (ot.Sample, ot.ProcessSample)):
                element.setName(self._outputDescription[i])
                outputList.append(element)
                print(
                    'Element {} of the output tuple returns elements of type {} of dimension {}'
                    .format(i, element.__class__.__name__,
                            element.getDimension()))
            elif isinstance(element, (Sequence, Iterable)):
                print(
                    'Element is iterable, assumes that first dimension is size of sample'
                )
                intermElem = CustomList(element)
                intermElem.recurse2list()
                shape = intermElem.shape
                dtype = intermElem.dtype
                print('Shape is {} and dtype is {}'.format(shape, dtype))
                sampleSize = shape[0]
                subSample = [
                    CustomList(intermElem[j]) for j in range(sampleSize)
                ]
                assert dtype is not None, 'If None the list is not homogenous'
                if isinstance(
                        dtype(),
                    (Complex, Integral, Real, Rational, Number, str)):
                    if len(shape) >= 2:
                        print(
                            'Element {} of the output tuple returns process samples of dimension {}'
                            .format(i,
                                    len(shape) - 1))
                        mesh = self._buildMesh(self._getGridShape(shape[1:]))
                        subSample = [
                            subSample[j].flatten() for j in range(sampleSize)
                        ]
                        procsample = ot.ProcessSample(mesh, 0, len(shape) - 1)
                        for j in range(sampleSize):
                            procsample.add(
                                ot.Field(mesh,
                                         [[elem]
                                          for elem in subSample[j].data]))
                        procsample.setName(self._outputDescription[i])
                        outputList.append(procsample)
                    elif len(shape) == 1:
                        print(
                            'Element {} of the output tuple returns samples of dimension {}'
                            .format(i, 1))
                        element = ot.Sample([[dat] for dat in intermElem.data])
                        element.setName(self._outputDescription[i])
                        outputList.append(element)
                else:
                    print('Do not use non-numerical dtypes in your objects')
                    print('Wrong dtype is: ', dtype.__name__)
            elif isinstance(element, ot.Point):
                print(
                    'Element {} of the output tuple returns samples of dimension 1'
                    .format(i,
                            type(element).__name__))
                element = ot.Sample([[element[j]]
                                     for j in range(len(element))])
                element.setName(self._outputDescription[i])
                outputList.append(element)
            elif isinstance(element, ot.Field):
                print(
                    'ONLY _exec_sample FUNCTION MUST RETURN ot.Sample OR ot.ProcessSample OBJECTS!!'
                )
                raise TypeError
            else:
                print('Element is {} of type {}'.format(
                    element, element.__class__.__name__))
                raise NotImplementedError
        return outputList
Example #18
0
"""
# %%
# In this example we present how to create a design of experiments when one (or several) of the marginals are discrete.

# %%
import openturns as ot
import openturns.viewer as viewer
from matplotlib import pylab as plt

ot.Log.Show(ot.Log.NONE)

# %%
# To create the first marginal of the distribution, we select a univariate discrete distribution. Some of them, like the `Bernoulli` or `Geometric` distributions, are implemented in the library as classes. In this example however, we pick the `UserDefined` distribution that assigns equal weights to the values -2, -1, 1 and 2.

# %%
sample = ot.Sample([[-2.], [-1.], [1.], [2.]])
sample

# %%
X0 = ot.UserDefined(sample)

# %%
# For the second marginal, we pick a Gaussian distribution.

# %%
X1 = ot.Normal()

# %%
# Create the multivariate distribution from its marginals and an independent copula.

# %%
Example #19
0
import openturns as ot
from openturns.viewer import View

# Combinations
d = ot.Combinations(3, 12)
s = ot.Sample(d.generate())
s.setDescription(["X1", "X2", "X3"])
g = ot.Graph()
g.setTitle("Combinations generator")
g.setGridColor("black")
p = ot.Pairs(s)
g.add(p)
View(g)
import openturns as ot
from openturns.viewer import View

ot.RandomGenerator.SetSeed(0)

dimension = 2
R = ot.CorrelationMatrix(dimension)
R[0, 1] = 0.8
distribution = ot.Normal([3.] * dimension, [2.] * dimension, R)
size = 100
sample = distribution.getSample(size)
firstSample = ot.Sample(size, 1)
secondSample = ot.Sample(size, 1)
for i in range(size):
    firstSample[i] = ot.Point(1, sample[i, 0])
    secondSample[i] = ot.Point(1, sample[i, 1])

lmtest = ot.LinearModelFactory().build(firstSample, secondSample)

drawLinearModel = ot.VisualTest.DrawLinearModel(firstSample, secondSample,
                                                lmtest)

View(drawLinearModel, figure_kwargs={'figsize': (5, 5)})
inf_distribution = factory.build(sample)
print('estimated distribution=', inf_distribution)

# set (a,b) out of (r, t, a, b)
distribution = ot.Beta(2.3, 2.2, -1.0, 1.0)
print('distribution=', distribution)
sample = distribution.getSample(size)
factory = ot.MethodOfMomentsFactory(ot.Beta())
factory.setKnownParameter([-1.0, 1.0], [2, 3])
inf_distribution = factory.build(sample)
print('estimated distribution=', inf_distribution)

# with bounds
data = [0.6852, 0.9349, 0.5884, 1.727, 1.581,
        0.3193, -0.5701, 1.623, 2.210, -0.3440, -0.1646]
sample = ot.Sample([[x] for x in data])
size = sample.getSize()
xMin = sample.getMin()[0]
xMax = sample.getMax()[0]
delta = xMax - xMin
a = xMin - delta / (size + 2)
b = xMax + delta / (size + 2)
distribution = ot.TruncatedNormal()
factory = ot.MethodOfMomentsFactory(distribution)
factory.setKnownParameter([a, b], [2, 3])
solver = factory.getOptimizationAlgorithm()
sampleMean = sample.computeMean()[0]
sampleSigma = sample.computeStandardDeviationPerComponent()[0]
startingPoint = [sampleMean, sampleSigma]
solver.setStartingPoint(startingPoint)
factory.setOptimizationAlgorithm(solver)
Example #22
0
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot
import openturns.testing as ott

ot.TESTPREAMBLE()
ot.PlatformInfo.SetNumericalPrecision(3)

size = 100

# no observations
x = ot.Sample(size, 0)

g = ot.SymbolicFunction(
    ["a", "b", "c"],
    [
        "a +  -1.0  * b +  1.0  * c",
        "a +  -0.6  * b +  0.36  * c",
        "a +  -0.2  * b +  0.04  * c",
        "a +  0.2  * b +  0.04  * c",
        "a +  0.6  * b +  0.36  * c",
        "a +  1.0  * b +  1.0  * c",
    ],
)

outputDimension = g.getOutputDimension()

trueParameter = [2.8, 1.2, 0.5]
params = [0, 1, 2]
Example #23
0
    rnorm = r['rnorm']
    set_seed = r['set.seed']

    ## generate regressor and dependent variable
    set_seed(0)
    X1 = rnorm(n_points)
    X2 = rnorm(n_points)
    err1 = rnorm(n_points)
    output = numpy2ri(ri2py(X1) + ri2py(X1) - ri2py(X2) + ri2py(err1) + 1)
    r.assign('X1', X1)
    r.assign('X2', X2)
    r.assign('output', output)

    ## perform Durbin-Watson test
    formula = Formula('output ~ X1 + X2')
    dw_test_1 = lmtest.dwtest(formula, alternative=hypothesis['R'], exact=True)
    print('Result from R :')
    print('p-value :', dw_test_1[3][0])
    print('Alternative hypothesis : ', dw_test_1[2][0])
    print('dw stat : ', dw_test_1[0][0])
    print('')

    # transformation into openturns objects
    firstSample = ot.Sample(np.column_stack((ri2py(X1), ri2py(X2))))
    secondSample = ot.Sample(np.vstack(ri2py(output)))
    test_result = ot.LinearModelTest.LinearModelDurbinWatson(
        firstSample, secondSample, hypothesis['ot'])
    print('Result from OT :')
    print('p-value :', test_result.getPValue())
    print(test_result.getDescription()[0])
# D[i]=max(abs(S+step),D[i]) 
# ```
# must be replaced with 
# ```
# D[i]=max(abs(S-step),D[i]) 
# ```

# %%
import openturns as ot
import openturns.viewer as viewer
from matplotlib import pylab as plt
ot.Log.Show(ot.Log.NONE)

# %%
x=[0.9374, 0.7629, 0.4771, 0.5111, 0.8701, 0.0684, 0.7375, 0.5615, 0.2835, 0.2508]
sample=ot.Sample([[xi] for xi in x])

# %%
samplesize = sample.getSize()
samplesize

# %%
# Plot the empirical distribution function.

# %%
graph = ot.UserDefined(sample).drawCDF()
graph.setLegends(["Sample"])
curve = ot.Curve([0,1],[0,1])
curve.setLegend("Uniform")
graph.add(curve)
graph.setXTitle("X")
ott.assert_almost_equal(myIsotropicKernel.getKernel().getAmplitude()[0],
                        amplitude, 1e-12, 0.0)
ott.assert_almost_equal(myIsotropicKernel.getKernel().getScale()[0], scale,
                        1e-12, 0.0)

# Standard tests applied
test_model(myIsotropicKernel)

# Test consistency of isotropic kernel's discretization
inputVector = ot.Point([0.3, 1.7])
inputVectorNorm = ot.Point([inputVector.norm()])
ott.assert_almost_equal(
    myOneDimensionalKernel(inputVectorNorm)[0, 0], 1.992315565746, 1e-12, 0.0)
ott.assert_almost_equal(
    myIsotropicKernel(inputVector)[0, 0], 1.992315565746, 1e-12, 0.0)
inputSample = ot.Sample([ot.Point(2), inputVector])
inputSampleNorm = ot.Sample([ot.Point(1), inputVectorNorm])
oneDimensionalCovMatrix = myOneDimensionalKernel.discretize(inputSampleNorm)
isotropicCovMatrix = myIsotropicKernel.discretize(inputSample)
ott.assert_almost_equal(oneDimensionalCovMatrix[0, 0], 2.250000000002, 1e-12,
                        0.0)
ott.assert_almost_equal(oneDimensionalCovMatrix[1, 1], 2.250000000002, 1e-12,
                        0.0)
ott.assert_almost_equal(isotropicCovMatrix[0, 0], 2.250000000002, 1e-12, 0.0)
ott.assert_almost_equal(isotropicCovMatrix[1, 1], 2.250000000002, 1e-12, 0.0)
ott.assert_almost_equal(oneDimensionalCovMatrix[0, 1], 1.992315565746, 1e-12,
                        0.0)
ott.assert_almost_equal(isotropicCovMatrix[0, 1], 1.992315565746, 1e-12, 0.0)

# Exponential covariance model
inputDimension = 2
try:
    dim = 10
    R = ot.CorrelationMatrix(dim)
    for i in range(dim):
        for j in range(i):
            R[i, j] = (i + j + 1.0) / (2.0 * dim)
    mean = [2.0] * dim
    sigma = [3.0] * dim
    distribution = ot.Normal(mean, sigma, R)

    size = 100
    sample = distribution.getSample(size)
    sampleY = sample.getMarginal(0)

    sampleZ = ot.Sample(size, 1)
    for i in range(size):
        sampleZ[i, 0] = sampleY[i, 0] * sampleY[i, 0]

    discreteSample1 = ot.Poisson(0.1).getSample(size)
    discreteSample2 = ot.Geometric(0.4).getSample(size)

    # ChiSquared Independance test : test if two samples (of sizes not necessarily equal) are independant ?
    # Care : discrete samples only
    # H0 = independent samples
    # p-value threshold : probability of the H0 reject zone : 0.10
    # p-value : probability (test variable decision > test variable decision evaluated on the samples)
    # Test = True <=> p-value > p-value threshold
    print("ChiSquared=",
          ot.HypothesisTest.ChiSquared(discreteSample1, discreteSample2, 0.10))
    print("ChiSquared2=",
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
ot.RandomGenerator.SetSeed(0)
factory = ot.GumbelFactory()
ref = factory.build()
dimension = ref.getDimension()
if dimension <= 2:
    sample = ref.getSample(50)
    distribution = factory.build(sample)
    if dimension == 1:
        distribution.setDescription(['$t$'])
        pdf_graph = distribution.drawPDF(256)
        cloud = ot.Cloud(sample, ot.Sample(sample.getSize(), 1))
        cloud.setColor('blue')
        cloud.setPointStyle('fcircle')
        pdf_graph.add(cloud)
        fig = plt.figure(figsize=(10, 4))
        plt.suptitle(str(distribution))
        pdf_axis = fig.add_subplot(111)
        View(pdf_graph, figure=fig, axes=[pdf_axis], add_legend=False)
    else:
        sample = ref.getSample(500)
        distribution.setDescription(['$t_0$', '$t_1$'])
        pdf_graph = distribution.drawPDF([256]*2)
        cloud = ot.Cloud(sample)
        cloud.setColor('red')
        cloud.setPointStyle('fcircle')
        pdf_graph.add(cloud)
        fig = plt.figure(figsize=(10, 4))
        plt.suptitle(str(distribution))
# If the `with_error` boolean is `True`, then the data is computed by adding a gaussian noise to the function values.

# %%
dim = 1
xmin = 0
xmax = 10
n_pt = 20  # number of initial points
with_error = True  # whether to use generation with error

# %%
ref_func_with_error = ot.SymbolicFunction(['x', 'eps'], ['x * sin(x) + eps'])
ref_func = ot.ParametricFunction(ref_func_with_error, [1], [0.0])
x = np.vstack(np.linspace(xmin, xmax, n_pt))
ot.RandomGenerator.SetSeed(1235)
eps = ot.Normal(0, 1.5).getSample(n_pt)
X = ot.Sample(n_pt, 2)
X[:, 0] = x
X[:, 1] = eps
if with_error:
    y = np.array(ref_func_with_error(X))
else:
    y = np.array(ref_func(x))

# %%
graph = ref_func.draw(xmin, xmax, 200)
cloud = ot.Cloud(x, y)
cloud.setColor('red')
cloud.setPointStyle('bullet')
graph.add(cloud)
graph.setLegends(["Function", "Data"])
graph.setLegendPosition("topleft")
 def getInlierSamples(self):
     indices = self.densityPlot.computeOutlierIndices(False)
     inlier_samples = np.array(self.sample)[:, indices]
     return ot.Sample(inlier_samples)
Example #30
0
distA = ot.Normal(a, 0.3 * a)
distB = ot.Normal(b, 0.3 * b)
distX = ot.ComposedDistribution([distY0, distA, distB])

# Sample the model
samplesize = 1000
inputSample = distX.getSample(samplesize)
outputSample = maFonctionChamp(inputSample)
# outputSample is a ProcessSample

# Draw some trajectories
graph = outputSample.drawMarginal(0)
graph.setTitle(modeleName)
graph.setXTitle(parameterIndexName)
graph.setYTitle(fieldName)
myTrajectories = [
    ot.Drawable.ConvertFromHSV(i * (360.0 / samplesize), 1.0, 1.0)
    for i in range(len(graph.getDrawables()))
]
graph.setColors(myTrajectories)
ot.Show(graph)

# Create a sample with nodes and values
data = ot.Sample(gridsize, samplesize + 1)
data[:, 0] = mesh.getVertices()
for i in range(samplesize):
    trajectory = outputSample[i].getValues()
    data[:, i + 1] = trajectory

data.exportToCSVFile("logistic-trajectories.csv")