예제 #1
0
import openturns as ot
from openturns.viewer import View

N = 1000
#create a sample X
dist = ot.Triangular(1.0, 5.0, 10.0)
# create a Y sample : Y = exp(X/2) + eps
eps = ot.Normal(0.0, 1.0)
sample = ot.ComposedDistribution([dist, eps]).getSample(N)
f = ot.SymbolicFunction(['x', 'eps'], ['exp(0.5*x)+eps'])
sampleY = f(sample)
sampleX = sample.getMarginal(0)
sampleX.setName('X')
# same as good test
regressionModel = ot.LinearModelAlgorithm(sampleX, sampleY).getResult()
graph = ot.VisualTest.DrawLinearModelResidual(sampleX, sampleY,
                                              regressionModel)
cloud = graph.getDrawable(0)
cloud.setPointStyle('times')
graph.setDrawable(cloud, 0)
graph.setTitle('')
View(graph)
예제 #2
0
    marginals = [ot.Beta(1.5, 3.2, 0.0, 1.0), ot.Beta(2.0, 4.3, 0.5, 1.2)]
    copula = ot.MaximumEntropyOrderStatisticsCopula(marginals)
elif ot.ExtremeValueCopula().__class__.__name__ == 'NormalCopula':
    R = ot.CorrelationMatrix(2)
    R[1, 0] = 0.8
    copula = ot.NormalCopula(R)
elif ot.ExtremeValueCopula().__class__.__name__ == 'SklarCopula':
    student = ot.Student(3.0, [1.0] * 2, [3.0] * 2, ot.CorrelationMatrix(2))
    copula = ot.SklarCopula(student)
else:
    copula = ot.ExtremeValueCopula()
if copula.getDimension() == 1:
    copula = ot.ExtremeValueCopula(2)
copula.setDescription(['$u_1$', '$u_2$'])
pdf_graph = copula.drawPDF()
cdf_graph = copula.drawCDF()
fig = plt.figure(figsize=(10, 4))
pdf_axis = fig.add_subplot(121)
cdf_axis = fig.add_subplot(122)
View(pdf_graph,
     figure=fig,
     axes=[pdf_axis],
     add_legend=False,
     square_axes=True)
View(cdf_graph,
     figure=fig,
     axes=[cdf_axis],
     add_legend=False,
     square_axes=True)
title = str(copula)[:100].split('\n')[0]
fig.suptitle(title)
예제 #3
0
# %%
showInference(bn, size="20")

# %%
showInference(bn, evs={"L": True}, size="20")

# %%
showInference(bn, evs={"L": False, "A": "0.2"}, size="20")

# %%
ie = gum.LazyPropagation(bn)
ie.addJointTarget(set(["T", "J"]))
ie.setEvidence({"L": True})
ie.makeInference()

# %%
distrib = otagrum.Utils.FromPotential(ie.jointPosterior({"T", "J"}))
distrib.drawPDF()
View(distrib.drawPDF())

# %%
ie = gum.LazyPropagation(bn)
ie.addJointTarget(set(["T", "J"]))
ie.setEvidence({"L": False})
ie.makeInference()

# %%
distrib = otagrum.Utils.FromPotential(ie.jointPosterior({"T", "J"}))
View(distrib.drawPDF())
plt.show()
예제 #4
0
x_plot = np.vstack(np.linspace(xmin, xmax, n_pts_plot))
fig, [ax1, ax2] = plt.subplots(1, 2, figsize=(12, 6))

# On the left, the function
graph = ref_func.draw(xmin, xmax, n_pts_plot)
graph.setLegends(["Function"])
graphKriging = krigingMeta.draw(xmin, xmax, n_pts_plot)
graphKriging.setColors(["green"])
graphKriging.setLegends(["Kriging"])
graph.add(graphKriging)
cloud = ot.Cloud(x, y)
cloud.setColor("red")
cloud.setLegend("Data")
graph.add(cloud)
graph.setLegendPosition("topleft")
View(graph, axes=[ax1])

# On the right, the conditional kriging variance
graph = ot.Graph("", "x", "Conditional kriging variance", True, '')
# Sample for the data
sample = ot.Sample(n_pt, 2)
sample[:, 0] = x
cloud = ot.Cloud(sample)
cloud.setColor("red")
graph.add(cloud)
# Sample for the variance
sample = ot.Sample(n_pts_plot, 2)
sample[:, 0] = x_plot
variance = [[krigingResult.getConditionalCovariance(xx)[0, 0]]
            for xx in x_plot]
sample[:, 1] = variance
    H = (Q / (Ks * B * alpha**0.5))**0.6
    Zc = H + Zv
    S = Zc - Zd
    return [S]


myFunction = ot.PythonFunction(4, 1, flooding)
myParam = ot.GumbelAB(1013.0, 558.0)
Q = ot.ParametrizedDistribution(myParam)
Q = ot.TruncatedDistribution(Q, 0.0, ot.SpecFunc.MaxScalar)
Ks = ot.Normal(30.0, 7.5)
Ks = ot.TruncatedDistribution(Ks, 0.0, ot.SpecFunc.MaxScalar)
Zv = ot.Uniform(49.0, 51.0)
Zm = ot.Uniform(54.0, 56.0)
inputX = ot.ComposedDistribution([Q, Ks, Zv, Zm])
inputX.setDescription(["Q", "Ks", "Zv", "Zm"])

size = 5000
computeSO = True
inputDesign = ot.SobolIndicesExperiment(inputX, size, computeSO).generate()
outputDesign = myFunction(inputDesign)
sensitivityAnalysis = ot.MartinezSensitivityAlgorithm(
    inputDesign, outputDesign, size)

graph = sensitivityAnalysis.draw()

fig = plt.figure(figsize=(8, 4))
axis = fig.add_subplot(111)
axis.set_xlim(auto=True)
View(graph, figure=fig, axes=[axis], add_legend=True)
예제 #6
0
event.setName("Deviation > %g cm" % threshold)

# %%
# Parameterize and run the Monte Carlo algorithm:

ot.RandomGenerator.SetSeed(23091926)  #  set seed for reproducibility

experiment = ot.MonteCarloExperiment()
algo = ot.ProbabilitySimulationAlgorithm(event, experiment)
algo.setMaximumOuterSampling(200)
algo.setMaximumCoefficientOfVariation(0.2)
algo.run()

# %%
# Draw the distribution of threshold excedance probability:

from openturns.viewer import View
monte_carlo_result = algo.getResult()
probabilityDistribution = monte_carlo_result.getProbabilityDistribution()
graph = View(probabilityDistribution.drawPDF())

# %%
# Get the probability with which the beam deviation exceeds 30 cm:

probability = monte_carlo_result.getProbabilityEstimate()
print("Threshold excedance probability: {}".format(probability))

# %%
# Given the uncertainties on the load applied and the beam mechanical
# parameters, the beam bending has a probability of 0.01 to exceed 30 cm.
# Is this probability low or not ? It depends on your context 🙂
예제 #7
0
import math as m

ot.RandomGenerator.SetSeed(0)

# 3-d test
R1 = ot.CovarianceMatrix(3)
R1[2, 1] = -0.25
R2 = ot.CovarianceMatrix(3)
R2[1, 0] = 0.5
R2[2, 1] = -0.3
R2[0, 0] = 1.3
print(R2)
dists = [ot.Normal([1.0, -2.0, 3.0], R1), ot.Normal([-1.0, 2.0, -2.0], R2)]
mixture = ot.Mixture(dists, [2.0 / 3.0, 1.0 / 3.0])

# 2-d test
dists = [
    ot.Normal([-1.0, 2.0], [1.0] * 2, ot.CorrelationMatrix(2)),
    ot.Normal([1.0, -2.0], [1.5] * 2, ot.CorrelationMatrix(2))
]
mixture = ot.Mixture(dists)

sample = mixture.getSample(100)
distribution = ot.KernelSmoothing().build(sample)
algo = ot.MinimumVolumeClassifier(distribution, 0.8)
graph = algo.drawContourAndSample([0.1, 0.5, 0.8], sample, [0, 1])

View(graph,
     contour_kw={'colors': ['black']},
     figure_kw={'figsize': (6.0, 6.0)})
예제 #8
0
dim = 1
f = ot.SymbolicFunction(['x'], ['x*sin(x)'])
uniform = ot.Uniform(0.0, 10.0)
distribution = ot.ComposedDistribution([uniform] * dim)
factoryCollection = [
    ot.OrthogonalUniVariateFunctionFamily(
        ot.OrthogonalUniVariatePolynomialFunctionFactory(
            ot.StandardDistributionPolynomialFactory(uniform)))
] * dim
functionFactory = ot.OrthogonalProductFunctionFactory(factoryCollection)
size = 10
sampleX = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]
sampleY = f(sampleX)
nk = [5] * dim
maxRank = 1
algo = ot.TensorApproximationAlgorithm(sampleX, sampleY, distribution,
                                       functionFactory, nk, maxRank)
algo.run()
result = algo.getResult()
metamodel = result.getMetaModel()

graph = f.draw(0.0, 10.0)
graph.add(metamodel.draw(0.0, 10.0))
graph.add(ot.Cloud(sampleX, sampleY))
graph.setColors(['blue', 'red', 'black'])
graph.setLegends(['model', 'meta model', 'sample'])
graph.setLegendPosition('topleft')
graph.setTitle('y(x)=x*sin(x)')
graph.setYTitle('y')
View(graph, figure_kwargs={'figsize': (8, 4)})
예제 #9
0
# 5. Create the Monte-Carlo algorithm
sampleSize = 100
inputSample = inputRandomVector.getSample(sampleSize)
#print(inputSample)
outputSigma = f(inputSample)
#print(outputSigma)

# 7. Plot the histogram
from openturns import VisualTest
histoGraph = VisualTest.DrawHistogram(outputSigma/1.e6,20)
histoGraph.setTitle("Histogramme de la contrainte")
histoGraph.setXTitle("Stress (MPa)")
histoGraph.setYTitle("Frequence")
#histoGraph.setBoundingBox([-1,7,0,0.60])
histoGraph.setLegends([""])
View(histoGraph).show()

# Generate observation noise
sigmaObservationNoiseSigma = 40.e6 # (Pa)
noiseSigma = ot.Normal(0.,sigmaObservationNoiseSigma)
sampleNoiseH = noiseSigma.getSample(sampleSize)
observedSigma = outputSigma + sampleNoiseH

# Create and save sample
observedSample = ot.Sample(sampleSize,2)
observedSample.setDescription(["Strain","Stress"])
observedSample[:,0] = inputSample[:,0]
observedSample[:,1] = observedSigma[:]

observedSample.exportToCSVFile("chaboche-observations.csv")
        inputValues = X.getValues()
        f = ot.NumericalMathFunction(
            ot.PiecewiseLinearEvaluationImplementation(
                [x[0] for x in inputTG.getVertices()], inputValues))
        outputValues = ot.NumericalSample(0, 1)
        for t in self.outputGrid_.getVertices():
            kernel = ot.Normal(t[0], 0.05)

            def pdf(X):
                return [kernel.computePDF(X)]

            weight = ot.NumericalMathFunction(ot.PythonFunction(1, 1, pdf))
            outputValues.add(
                self.algo_.integrate(weight * f, kernel.getRange()))
        return ot.Field(self.outputGrid_, outputValues)


N = 5
X = ot.TemporalNormalProcess(ot.GeneralizedExponential([0.1], 1.0),
                             ot.RegularGrid(-5.0, 0.1, 101))
f = ot.DynamicalFunction(GaussianConvolution())
Y = ot.CompositeProcess(f, X)
x_graph = X.getSample(N).drawMarginal(0)
y_graph = Y.getSample(N).drawMarginal(0)
fig = plt.figure(figsize=(10, 4))
plt.suptitle("Composite process")
x_axis = fig.add_subplot(121)
y_axis = fig.add_subplot(122)
View(x_graph, figure=fig, axes=[x_axis], add_legend=False)
View(y_graph, figure=fig, axes=[y_axis], add_legend=False)
예제 #11
0
tmin = 1790.0  # Date minimale
tmax = 2001.0  # Date maximale
gridsize = 100  # Nombre de pas de temps
modeleName = "Modèle Logistique"  # Nom du modèle
parameterIndexName = "Temps (années)"  # Nom du paramètre d'indexation
fieldName = "Population (millions)"  # Nom du champ
alphaInf = 0.05  # Valeur du niveau alpha pour le quantile inférieur
# Définit l'intervalle de temps pour la simulation
monhorizon = ot.Interval(tmin, tmax)
# Définit la grille temporelle régulière
mesh = ot.IntervalMesher([gridsize - 1]).build(monhorizon)
graph1 = mesh.draw()
graph1.setTitle(modeleName)
graph1.setXTitle(parameterIndexName)
View(graph1)

from numpy import exp
from numpy import array


def logisticSolution(X):
    # Récupère les noeuds du maillage
    v = mesh.getVertices()
    # Convertit en tableau
    t = array(v)
    # Convertit le tableau 2D en tableau 1D
    t = t.flatten()
    # Récupère la date initiale
    t0 = t[0]
    # Calcule la trajectoire
예제 #12
0
    pl.plot([i, i], [to_ci_a, to_ci_b], "b-")
pl.legend()

for i in range(input_dimension):
    dist_fo_i = dist_fo.getMarginal(i)
    dist_to_i = dist_to.getMarginal(i)
    print("X%d, S=%s, ST=%s" % (i, str(dist_fo_i), str(dist_to_i)))
'''
Nombre d'évaluations

'''
nbiter = outerSampling * blockSize
print("Nb iterations = %d" % (nbiter))
nbfunceval = nbiter * (input_dimension + 2)
print("Nb function evaluations = %d" % (nbfunceval))
'''
View(algo.drawFirstOrderIndexConvergence())
View(algo.drawTotalOrderIndexConvergence())

for i in range(input_dimension):
    dist_fo_i = dist_fo.getMarginal(i)
    graph = dist_fo_i.drawPDF()
    graph.setTitle("S%d" % (i))
    graph.setXTitle("S%d" % (i))
    graph.setLegends([""])
    View(graph)
    dist_to_i = dist_to.getMarginal(i)
    graph = dist_to_i.drawPDF()
    graph.setTitle("ST%d" % (i))
    graph.setXTitle("ST%d" % (i))
    graph.setLegends([""])
예제 #13
0
# %%
# Seems that the linearity hypothesis is accurate.

# %%
# We complete this analysis using some usefull graphs :

# %%
fig = plt.figure(figsize=(12, 10))
for k, plot in enumerate([
        "drawResidualsVsFitted", "drawScaleLocation", "drawQQplot",
        "drawCookDistance", "drawResidualsVsLeverages", "drawCookVsLeverages"
]):
    graph = getattr(analysis, plot)()
    ax = fig.add_subplot(3, 2, k + 1)
    v = View(graph, figure=fig, axes=[ax])
_ = v.getFigure().suptitle("Diagnostic graphs", fontsize=18)

# %%
# These graphics help asserting the linear model hypothesis. Indeed :
#
#  - Quantile-to-quantile plot seems accurate
#
#  - We notice heteroscedasticity within the noise
#
#  - It seems that there is no outlier

# %%
# Finally we give the intervals for each estimated coefficient (95% confidence interval):

# %%
예제 #14
0
# Graph section
# We build 2 curves
# each one is function of frequency values
ind = ot.Indices(2)
ind.fill()

# Some cosmetics : labels, legend position, ...
graph = ot.Graph("Estimated spectral function - Validation", "Frequency",
                 "Spectral density function", True, "topright", 1.0, ot.GraphImplementation.LOGY)

# The first curve is the estimate density as function of frequency
curve1 = ot.Curve(plotSample.getMarginal(ind))
curve1.setColor('blue')
curve1.setLegend('estimate model')

# The second curve is the theoritical density as function of frequency
ind[1] = 2
curve2 = ot.Curve(plotSample.getMarginal(ind))
curve2.setColor('red')
curve2.setLegend('Cauchy model')

graph.add(curve1)
graph.add(curve2)

fig = plt.figure(figsize=(10, 4))
plt.suptitle('Spectral model estimation')
graph_axis = fig.add_subplot(111)
view = View(graph, figure=fig, axes=[graph_axis], add_legend=False)
view.show()
예제 #15
0
             for i in range(len(data))]
bounds = ot.PolygonArray([
    ot.Polygon(
        [dataLower[i], dataLower[i + 1], dataUpper[i + 1], dataUpper[i]],
        "green", "green") for i in range(len(dataLower) - 1)
])
graph = ot.Graph()
graph.setLegendPosition("bottomright")
graph.setAxes(True)
graph.setGrid(True)
graph.add(c)
graph.add(bounds)

d = f.draw(xMin, xMax).getDrawable(0)
d.setLineStyle("dashed")
d.setColor("magenta")
d.setLineWidth(2)
graph.add(d)
graph.add(graph_meta)
cloud = ot.Cloud(X, Y)
cloud.setPointStyle("circle")
cloud.setColor("red")
graph.add(cloud)
graph.setTitle("Kriging meta-modeling")
graph.setXTitle(r"$x$")
graph.setYTitle(r"$f$")
graph.setLegends(["95% conf. bounds", "true function", "meta-model", "data"])
view = View(graph, (800, 600))
view.save("../plot_kriging.png")
view.close()
예제 #16
0
tic = time.time()
design = sa.generate()
result = sa.getResult()
toc = time.time()
dt1 = toc-tic
print("time=%f"%dt1)
print("dimension=%d, size=%d,sa=%s"%(dimension, size, sa))
print(str(result.getOptimalValue())+" c2="+str(result.getC2())+" phiP="+str(result.getPhiP())+" minDist="+str(result.getMinDist()))
crit = result.drawHistoryCriterion()
proba = result.drawHistoryProbability()
temp = result.drawHistoryTemperature()

pp = PdfPages('large_OTLHS.pdf')

# Criterion
fig = View(crit, plot_kwargs={'color':'blue'}).getFigure()
fig.savefig("otlhs_c2_crit_big.png")
pp.savefig(fig)
plt.close(fig)
# Proba
fig = View(proba, plot_kwargs={'marker': 'o', 'ms': 0.6}, axes_kwargs={'ylim': [-0.05, 1.05]}).getFigure()
fig.savefig("lhs_c2_proba_big.png")
pp.savefig(fig)
plt.close(fig)
# Temperature
fig = View(temp).getFigure()
pp.savefig(fig)
plt.close(fig)

minDist = ot.SpaceFillingMinDist()
sa = ot.SimulatedAnnealingLHS(lhsDesign, geomProfile, minDist)
예제 #17
0
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View

myCop1 = ot.GumbelCopula(2)
myCop2 = ot.NormalCopula(2)
alpha = 0.3
myOrdSumCop = ot.OrdinalSumCopula([myCop1, myCop2], [alpha])
myOrdSumCop.setDescription(['$u_1$', '$u_2$'])
graphPDF = myOrdSumCop.drawPDF()
graphCDF = myOrdSumCop.drawCDF()

fig = plt.figure(figsize=(8, 4))
pdf_axis = fig.add_subplot(121)
cdf_axis = fig.add_subplot(122)
pdf_axis.set_xlim(auto=True)
cdf_axis.set_xlim(auto=True)

View(graphPDF, figure=fig, axes=[pdf_axis], add_legend=True)
View(graphCDF, figure=fig, axes=[cdf_axis], add_legend=True)
fig.suptitle("Ordinal Sum of Copulas: Gumbel(2) and Normal(2): pdf and cdf")
예제 #18
0
import openturns as ot
from openturns.viewer import View

ot.RandomGenerator.SetSeed(0)

size = 100
inputDimension = 6
inputSample = ot.Normal(inputDimension).getSample(size)
inputVar = ['X' + str(i) for i in range(inputDimension)]
inputSample.setDescription(inputVar)
expression = ''
for i in range(inputDimension):
    if i > 0:
        expression += '+'
    expression += 'cos(' + str(i + 1) + '*' + inputVar[i] + ')'
model = ot.SymbolicFunction(inputVar, [expression])
outputSample = model(inputSample)

cobweb = ot.VisualTest.DrawParallelCoordinates(
    inputSample, outputSample, 2.5, 3.0, 'red', False)

View(cobweb, figure_kw={'figsize': (10, 6)},
     legend_kw={'loc': 'lower right'})
예제 #19
0
from openturns.viewer import View
ot.RandomGenerator.SetSeed(0)
factory = ot.GumbelFactory()
ref = factory.build()
dimension = ref.getDimension()
if dimension <= 2:
    sample = ref.getSample(50)
    distribution = factory.build(sample)
    if dimension == 1:
        distribution.setDescription(['$t$'])
        pdf_graph = distribution.drawPDF(256)
        cloud = ot.Cloud(sample, ot.NumericalSample(sample.getSize(), 1))
        cloud.setColor('blue')
        cloud.setPointStyle('fcircle')
        pdf_graph.add(cloud)
        fig = plt.figure(figsize=(10, 4))
        plt.suptitle(str(distribution))
        pdf_axis = fig.add_subplot(111)
        View(pdf_graph, figure=fig, axes=[pdf_axis], add_legend=False)
    else:
        sample = ref.getSample(500)
        distribution.setDescription(['$t_0$', '$t_1$'])
        pdf_graph = distribution.drawPDF([256]*2)
        cloud = ot.Cloud(sample)
        cloud.setColor('red')
        cloud.setPointStyle('fcircle')
        pdf_graph.add(cloud)
        fig = plt.figure(figsize=(10, 4))
        plt.suptitle(str(distribution))
        pdf_axis = fig.add_subplot(111)
        View(pdf_graph, figure=fig, axes=[pdf_axis], add_legend=False)
x_min = -2.0
x_max = 2.0
n_points = 128

parametric_graph = f.draw(x_min, x_max, n_points)
continuous_graph = continuous_measure.draw(x_min, x_max, n_points)
discretized_graph = discretized_measure.draw(x_min, x_max, n_points)

parametric_curve = parametric_graph.getDrawable(0)
discretized_curve = discretized_graph.getDrawable(0)

left_graph = ot.Graph(continuous_graph)
left_graph.add(parametric_curve)
#left_graph.setLegends(['measure', 'parametric function'])
#left_graph.setLegendPosition('topright')
left_graph.setColors(['blue', 'red'])

right_graph = ot.Graph(continuous_graph)
right_graph.add(discretized_curve)
#right_graph.setLegends(['measure', 'discretized measure'])
#right_graph.setLegendPosition('topright')
right_graph.setColors(['blue', 'red'])

fig = plt.figure(figsize=(10, 4))
plt.suptitle(str(measure))
left_axis = fig.add_subplot(121)
right_axis = fig.add_subplot(122)

View(left_graph, figure=fig, axes=[left_axis], add_legend=False)
View(right_graph, figure=fig, axes=[right_axis], add_legend=False)
예제 #21
0
#!/usr/bin/env python

from __future__ import print_function
import openturns as ot
import sys

ot.TESTPREAMBLE()

grid = ot.GridLayout(2, 3)
palette = ot.Drawable.BuildDefaultPalette(10)
for j in range(grid.getNbColumns()):
    alpha = 1.0 + j
    pdf_curve = ot.WeibullMin(1.0, alpha, 0.0).drawPDF()
    cdf_curve = ot.WeibullMin(1.0, alpha, 0.0).drawCDF()
    pdf_curve.setColors([palette[j]])
    cdf_curve.setColors([palette[j]])
    pdf_curve.setLegends(['alpha={}'.format(alpha)])
    cdf_curve.setLegends(['alpha={}'.format(alpha)])
    grid.setGraph(0, j, pdf_curve)
    grid.setGraph(1, j, cdf_curve)

assert (grid.getNbColumns() == 3)
assert (grid.getNbRows() == 2)
assert (grid.getGraph(
    0, 0).getDrawable(0).getImplementation().getClassName() == "Curve")

if len(sys.argv) > 1:
    from openturns.viewer import View
    View(grid).save('grid.png')
import openturns as ot
from openturns.viewer import View

ot.RandomGenerator.SetSeed(0)

dimension = 2
R = ot.CorrelationMatrix(dimension)
R[0, 1] = 0.8
distribution = ot.Normal([3.] * dimension, [2.] * dimension, R)
size = 100
sample = distribution.getSample(size)
firstSample = ot.Sample(size, 1)
secondSample = ot.Sample(size, 1)
for i in range(size):
    firstSample[i] = ot.Point(1, sample[i, 0])
    secondSample[i] = ot.Point(1, sample[i, 1])

lmtest = ot.LinearModelFactory().build(firstSample, secondSample)

drawLinearModel = ot.VisualTest.DrawLinearModel(firstSample, secondSample,
                                                lmtest)

View(drawLinearModel, figure_kwargs={'figsize': (5, 5)})
예제 #23
0
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
from math import sqrt

domain = ot.Interval(-1.0, 1.0)
basis = ot.OrthogonalProductFunctionFactory([ot.FourierSeriesFactory()])
coll = [basis.build(i) for i in range(10)]
experiment = ot.GaussProductExperiment(basis.getMeasure(), [20])
mustScale = False
threshold = 0.001
model = ot.AbsoluteExponential([1.0])
algo = ot.KarhunenLoeveQuadratureAlgorithm(
    domain, domain, model, experiment, coll, mustScale, threshold)
algo.run()
ev = algo.getResult().getEigenvalues()
modes = algo.getResult().getScaledModes()
g = ot.Graph("Quadrature approx. of KL expansion for $C(s,t)=e^{-|s-t|}$")
g.setAxes(True)
g.setGrid(True)
g.setXTitle("$t$")
g.setYTitle("$\sqrt{\lambda_n}\phi_n$")
for mode in modes:
    g.add(mode.draw(-1.0, 1.0, 256))
g.setColors(ot.Drawable.BuildDefaultPalette(len(modes)))

fig = plt.figure(figsize=(6, 4))
axis = fig.add_subplot(111)
axis.set_xlim(auto=True)
View(g, figure=fig, axes=[axis], add_legend=False)
import openturns as ot
from math import exp
from matplotlib import pyplot as plt
from openturns.viewer import View

mesh = ot.RegularGrid(0.0, 1.0, 4)
values = [[0.5], [1.5], [1.0], [-0.5]]
field = ot.Field(mesh, values)
func = ot.P1LagrangeEvaluationImplementation(field)
func.setDescription(['$x$', '$y$'])

graph = func.draw(-1.0, 4.0, 1024)
cloud = ot.Cloud(mesh.getVertices(), values)
cloud.setPointStyle("square")
graph.add(cloud)
graph.setColors(["blue", "red"])
fig = plt.figure(figsize=(10, 4))
plt.suptitle('P1 Lagrange interpolation')
func_axis = fig.add_subplot(111)
view = View(graph, figure=fig, axes=[func_axis], add_legend=False)
view.show()
import openturns as ot
from openturns.viewer import View

X = ot.RandomVector(ot.Normal())
f = ot.SymbolicFunction(['x'], ['x^2*sin(x)'])
Y = ot.CompositeRandomVector(f, X)
sample = Y.getSample(200)
histogram = ot.VisualTest.DrawHistogram(sample)
histogram.setTitle('Y=x^2*sin(x)')
View(histogram, figure_kwargs={'figsize': (6, 4)}, add_legend=False)
예제 #26
0
import openturns as ot
from openturns.viewer import View

center = [0.0]
constant = [3.0]
linear = ot.Matrix([[2.0]])
quadratic = ot.SymmetricTensor([[[5.0]]])
f = ot.QuadraticFunction(center, constant, linear, quadratic)

graph = f.draw(0.0, 10.0)
graph.setTitle('$y=5x^2+2x+3$')
View(graph, figure_kwargs={'figsize': (8, 4)}, add_legend=True)
                                       delta_t, 2**12, 2**3, 1e-2))
    values_FORM.append(
        computeCrossingProbability_FORM(b, tick[0], mu_S, covariance, R,
                                        delta_t))

# %%
print('Values MC = ', values_MC)
print('Values QMC = ', values_QMC)
print('Values FORM = ', values_FORM)

# %%
# Draw the graphs!

# %%
g = ot.Graph()
g.setAxes(True)
g.setGrid(True)
c = ot.Curve(times, [[p] for p in values_MC])
g.add(c)
c = ot.Curve(times, [[p] for p in values_QMC])
g.add(c)
c = ot.Curve(times, [[p] for p in values_FORM])
g.add(c)
g.setLegends(["MC", "QMC", "FORM"])
g.setColors(["red", "blue", 'black'])
g.setLegendPosition("topleft")
g.setXTitle("t")
g.setYTitle("Outcrossing rate")
view = View(g)
view.ShowAll()
예제 #28
0
import openturns as ot
from openturns.viewer import View

d = ot.Axial([1.5, 2.5, 3.5], [1, 2, 3])
s = d.generate()
s.setDescription(["X1", "X2", "X3"])
g = ot.Graph()
g.setTitle("Axial experiment")
g.setGridColor("black")
p = ot.Pairs(s)
g.add(p)
View(g)
예제 #29
0
import openturns as ot
from openturns.viewer import View

ot.RandomGenerator.SetSeed(0)

size = 100
normal = ot.Normal(1)
sample = normal.getSample(size)

henryPlot = ot.VisualTest.DrawHenryLine(sample)

View(henryPlot, figure_kwargs={'figsize': (4.5, 4.5)})
예제 #30
0
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
covarianceModel = ot.SquaredExponential()
if covarianceModel.getSpatialDimension() == 1:
    scale = covarianceModel.getScale()[0]
    if covarianceModel.isStationary():
        def f(x):
            return [covarianceModel(x)[0, 0]]
        func = ot.PythonFunction(1,1,f)
        func.setDescription(['$tau$', '$cov$'])
        cov_graph = func.draw(-3.0 * scale, 3.0 * scale, 129)
        fig = plt.figure(figsize=(10, 4))
        plt.suptitle(str(covarianceModel))
        cov_axis = fig.add_subplot(111)
        View(cov_graph, figure=fig, axes=[cov_axis], add_legend=False)
    else:
        def f(x):
            return [covarianceModel([x[0]], [x[1]])[0, 0]]
        func = ot.PythonFunction(2,1,f)
        func.setDescription(['$s$', '$t$', '$cov$'])
        cov_graph = func.draw([-3.0 * scale]*2, [3.0 * scale]*2, [129]*2)
        fig = plt.figure(figsize=(10, 4))
        plt.suptitle(str(covarianceModel))
        cov_axis = fig.add_subplot(111)
        View(cov_graph, figure=fig, axes=[cov_axis], add_legend=False)
distribution = ot.KernelSmoothing().build(sample)
algo = ot.MinimumVolumeClassifier(distribution, 0.8)
threshold = algo.getThreshold()
print("threshold=", threshold)
assert m.fabs(threshold - 0.0012555) < 1e-3, "wrong threshold"
cls_ref = [
    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
    1, 0, 1, 1, 0, 1, 1, 1, 1, 1
]
for i in range(35):
    x = sample[i]
    cls = algo.classify(x)
    pdf = mixture.computePDF(x)
    print(i, x, cls, pdf - threshold)
    assert cls == cls_ref[i], "wrong class"

graph1 = algo.drawSample(sample, [0])
graph2 = algo.drawSample(sample, [1])
graph3 = algo.drawSample(sample, [0, 1])
contour_alpha = [0.1, 0.5, 0.8]
graph4 = algo.drawContour(contour_alpha)
graph5 = algo.drawContourAndSample(contour_alpha, sample, [0, 1])

if len(sys.argv) > 1:
    from openturns.viewer import View
    #View(graph3).show()
    #View(graph4).show()
    view = View(graph5)
    view.save('mvc.png')
    view.ShowAll()
예제 #32
0
print("dimension=%d, size=%d" % (dimension, size))
for nSimu in [100, 200, 400, 800, 1600, 3200, 6400, 12800, 25600, 51200, 102400, 204800, 409600]:
    ot.RandomGenerator.SetSeed(0)
    # Factory: lhs generates
    lhsDesign = otlhs.LHSDesign(bounds, size)
    mc = otlhs.MonteCarloLHS(lhsDesign, nSimu, c2)
    tic = time.time()
    result = mc.generate()
    toc = time.time()
    print("%d %f %f" % (nSimu, result.getOptimalValue(), toc - tic))

pp = PdfPages("small_mc_OTLHS.pdf")
# plot criterion & save it
crit = result.drawHistoryCriterion()
fig = View(crit, plot_kwargs={"color": "blue"}).getFigure()
pp.savefig(fig)
plt.close(fig)
# plot design
fig = PyPlotDesign(result.getOptimalDesign(), bounds, size, size, plot_kwargs={"color": "blue", "marker": "o", "ms": 6})
plt.suptitle("LHS design of size=%d - Optimization of %s criterion using %d MC sample" % (size, c2.getName(), nSimu))
fig.savefig("lhs_mc_c2_%d.png" % size)
plt.close(fig)

minDist = otlhs.SpaceFillingMinDist()

# Factory: lhs generates
lhsDesign = otlhs.LHSDesign(bounds, size)
mc = otlhs.MonteCarloLHS(lhsDesign, nSimu, minDist)
tic = time.time()
result = mc.generate()
import openturns as ot
from openturns.viewer import View

ot.RandomGenerator.SetSeed(0)

dimension = 2
R = ot.CorrelationMatrix(dimension)
R[0, 1] = 0.8
distribution = ot.Normal([3.] * dimension, [2.] * dimension, R)
size = 100
sample = distribution.getSample(size)
firstSample = ot.Sample(size, 1)
secondSample = ot.Sample(size, 1)
for i in range(size):
    firstSample[i] = ot.Point(1, sample[i, 0])
    secondSample[i] = ot.Point(1, sample[i, 1])

lmtest = ot.LinearModelAlgorithm(firstSample, secondSample).getResult()

drawLinearModelResidual = ot.VisualTest.DrawLinearModelResidual(lmtest)

View(drawLinearModelResidual, figure_kw={'figsize': (5, 5)})
예제 #34
0
#! /usr/bin/env python

# use non-interactive backend
import matplotlib

matplotlib.use("Agg")

from openturns.viewer import View
import openturns as ot

# Curve
graph = ot.Normal().drawCDF()
# graph.draw('curve1.png')
view = View(graph, plot_kwargs={"color": "blue"})
# view.save('curve1.png')
view.show(block=False)

# Contour
graph = ot.Normal([1, 2], [3, 5], ot.CorrelationMatrix(2)).drawPDF()
# graph.draw('curve2.png')
view = View(graph)
# view.save('curve2.png')
view.show(block=False)

# Histogram tests
normal = ot.Normal(1)
size = 100
sample = normal.getSample(size)
graph = ot.VisualTest.DrawHistogram(sample, 10)
# graph.draw('curve3.png')
view = View(graph)
예제 #35
0
sa = ot.SimulatedAnnealingLHS(lhsDesign, geomProfile, c2)
tic = time.time()
result = sa.generate()
toc = time.time()
dt1 = toc-tic
print("time=%f"%dt1)
print("dimension=%d, size=%d,sa=%s"%(dimension, size, sa))
print(str(result.getOptimalValue())+" c2="+str(result.getC2())+" phiP="+str(result.getPhiP())+" minDist="+str(result.getMinDist()))

crit = result.drawHistoryCriterion()
proba = result.drawHistoryProbability()
temp = result.drawHistoryTemperature()

pp = PdfPages('small_OTLHS.pdf')
# Criterion
fig = View(crit, plot_kwargs={'color':'blue'}).getFigure()
fig.savefig("crit_sa_geom.png")
pp.savefig(fig)
plt.close(fig)
# Proba
fig = View(proba, plot_kwargs={'marker': 'o', 'ms': 0.6}, axes_kwargs={'ylim': [-0.05, 1.05]}).getFigure()
fig.savefig("lhs_c2_proba.png")
pp.savefig(fig)
plt.close(fig)
# Temperature
fig = View(temp).getFigure()
pp.savefig(fig)
plt.close(fig)

linearProfile = ot.LinearProfile(10.0, 50000)
minDist = ot.SpaceFillingMinDist()