예제 #1
0
def data():
    """Load diabetes dataset."""
    dataset = datasets.load_diabetes()
    X = dataset.data
    y = dataset.target.reshape(-1,1)
    dim = X.shape[1]
    distribution = ot.ComposedDistribution([ot.HistogramFactory().build(ot.Sample(X).getMarginal(i))
                                            for i in range(dim)])

    return X, y, dim, distribution
    def drawResidualsDistribution(self, model="uncensored", name=None):
        """
        Draw the residuals histogram with the fitted distribution.

        Parameters
        ----------
        model : string
            The residuals to be used, either *uncensored* or
            *censored* if censored threshold were given. Default is *uncensored*.
        name : string
            name of the figure to be saved with *transparent* option sets to True
            and *bbox_inches='tight'*. It can be only the file name or the 
            full path name. Default is None.

        Returns
        -------
        fig : `matplotlib.figure <http://matplotlib.org/api/figure_api.html>`_
            Matplotlib figure object.
        ax : `matplotlib.axes <http://matplotlib.org/api/axes_api.html>`_
            Matplotlib axes object.
        """

        # Check is the censored model exists when asking for it 
        if model == "censored" and not self._censored:
            raise NameError('Residuals for censored data is not available.')

        if model == "uncensored":
            residuals = self._resultsUnc.residuals
            distribution = self._resultsUnc.resDist
        elif model =="censored":
            residuals = self._resultsCens.residuals
            distribution = self._resultsCens.resDist
        else:
            raise NameError("model can be 'uncensored' or 'censored'.")

        fig, ax = plt.subplots(figsize=(8, 6))
        graphHist = ot.HistogramFactory().build(residuals).drawPDF()
        graphPDF = distribution.drawPDF()
        graphHist.setGrid(True)
        View(graphHist, axes=[ax], bar_kw={'color':'blue','alpha': 0.5, 'label':'Residuals histogram'})
        View(graphPDF, axes=[ax], plot_kw={'label':distribution.__str__()})
        ax.set_xlabel('Defect realizations')
        if model == "uncensored":
            ax.set_title('Residuals distribution')
        elif model == "censored":
            ax.set_title('Residuals distribution for censored data')

        if name is not None:
            fig.savefig(name, bbox_inches='tight', transparent=True)

        return fig, ax
def KSG_learning(data):
    print("Build KSG coefficients distribution")
    size = data.getSize()
    dimension = data.getDimension()
    t0 = time()
    marginals = [
        ot.HistogramFactory().build(data.getMarginal(i))
        for i in range(dimension)
    ]
    plot_marginals("KSG_marginals", marginals)
    copula = ot.NormalCopulaFactory().build(data)
    distribution = ot.ComposedDistribution(marginals, copula)
    print("t=", time() - t0, "s")
    return distribution
def KSB_learning(data):
    # Less naive estimation of the coefficients distribution using
    # univariate kernel smoothing for the marginals and a Bernstein copula
    print("Build KSB coefficients distribution")
    size = data.getSize()
    dimension = data.getDimension()
    t0 = time()
    marginals = [
        ot.HistogramFactory().build(data.getMarginal(i))
        for i in range(dimension)
    ]
    # marginals = [ot.KernelSmoothing().build(data.getMarginal(i)) for i in range(dimension)]
    plot_marginals("KSB_marginals", marginals)
    copula = ot.EmpiricalBernsteinCopula(data, size)
    #copula = ot.BernsteinCopulaFactory().build(data)
    distribution = ot.ComposedDistribution(marginals, copula)
    print("t=", time() - t0, "s")
    return distribution
예제 #5
0
def BuildDistribution(X):
    #return ot.FunctionalChaosAlgorithm.BuildDistribution(X)
    input_dimension = X.shape[1]
    marginals = []
    for j in range(input_dimension):
        marginals.append(ot.HistogramFactory().build(X[:,j].reshape(-1, 1)))
    isIndependent = True
    for j in range(input_dimension):
        marginalJ = X[:,j].reshape(-1, 1)
        for i in range(j + 1, input_dimension):
            marginalI = X[:,i].reshape(-1, 1)
            testResult = ot.HypothesisTest.Spearman(marginalI, marginalJ)
            isIndependent = isIndependent and testResult.getBinaryQualityMeasure()
    copula = ot.IndependentCopula(input_dimension)
    if not isIndependent:
        copula = ot.NormalCopulaFactory().build(X)
    distribution = ot.ComposedDistribution(marginals, copula)
    return distribution
예제 #6
0
    def fit_to_histogram_distribution(self, parameter, showQQ=False):
        """Generate histogram from results.

        Parameters
        ----------
        parameter: string
            Parameter, whose distribution is to be found.

        Returns
        -------
        :class:`openturns.Distribution`
        """
        sample = self.sampledict[parameter]
        distribution = ot.HistogramFactory().build(sample)
        logger.debug(distribution)
        if showQQ:
            # Draw QQ plot to check fitted distribution
            QQ_plot = ot.VisualTest.DrawQQplot(sample, distribution)
            View(QQ_plot).show()
        return distribution
def CBN_parameter_learning(data, dag):
    size = data.getSize()
    dimension = data.getDimension()
    print("    Learning parameters")
    t1 = time()
    print("        Learning the CBN parameters")
    t2 = time()
    ot.ResourceMap.SetAsUnsignedInteger("BernsteinCopulaFactory-kFraction", 2)
    ot.ResourceMap.SetAsUnsignedInteger("BernsteinCopulaFactory-MinM",
                                        size // 2 - 2)
    ot.ResourceMap.SetAsUnsignedInteger("BernsteinCopulaFactory-MaxM",
                                        size // 2 - 1)
    cbn = otagrum.ContinuousBayesianNetworkFactory(ot.HistogramFactory(),
                                                   ot.BernsteinCopulaFactory(),
                                                   dag, 0, 0,
                                                   False).build(data)
    print("        t=", time() - t2, "s")
    print("        Learning the marginal parameters")
    t2 = time()
    # marginals = [ot.KernelSmoothing().build(data.getMarginal(i)) for i in range(dimension)]
    # marginals = [ot.HistogramFactory().build(data.getMarginal(i)) for i in range(dimension)]
    print("        t=", time() - t2, "s")
    print("    t=", time() - t1, "s")
    return cbn
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
ot.RandomGenerator.SetSeed(0)
factory = ot.HistogramFactory()
ref = factory.build()
dimension = ref.getDimension()
if dimension <= 2:
    sample = ref.getSample(50)
    distribution = factory.build(sample)
    if dimension == 1:
        distribution.setDescription(['$t$'])
        pdf_graph = distribution.drawPDF(256)
        cloud = ot.Cloud(sample, ot.NumericalSample(sample.getSize(), 1))
        cloud.setColor('blue')
        cloud.setPointStyle('fcircle')
        pdf_graph.add(cloud)
        fig = plt.figure(figsize=(10, 4))
        plt.suptitle(str(distribution))
        pdf_axis = fig.add_subplot(111)
        View(pdf_graph, figure=fig, axes=[pdf_axis], add_legend=False)
    else:
        sample = ref.getSample(500)
        distribution.setDescription(['$t_0$', '$t_1$'])
        pdf_graph = distribution.drawPDF([256]*2)
        cloud = ot.Cloud(sample)
        cloud.setColor('red')
        cloud.setPointStyle('fcircle')
        pdf_graph.add(cloud)
        fig = plt.figure(figsize=(10, 4))
        plt.suptitle(str(distribution))
uniformSample = U.getSample(n)

# %%
# To generate the numbers, we evaluate the quantile function on the uniform numbers.

# %%
weibullSample = quantile(uniformSample)

# %%
# In order to compare the results, we use the `WeibullMin` class (using the default value of the location parameter :math:`\gamma=0`).

# %%
W = ot.WeibullMin(beta,alpha)

# %%
histo = ot.HistogramFactory().build(weibullSample).drawPDF()
histo.setTitle("Weibull alpha=%s, beta=%s, n=%d" % (alpha,beta,n))
histo.setLegends(["Sample"])
wpdf = W.drawPDF()
wpdf.setColors(["blue"])
wpdf.setLegends(["Weibull"])
histo.add(wpdf)
view = viewer.View(histo)

# %%
# We see that the empirical histogram of the generated outcomes is close to the exact density of the Weibull distribution.

# %%
# Visualization of the quantiles
# ------------------------------
예제 #10
0
from matplotlib import pylab as plt
ot.Log.Show(ot.Log.NONE)

# %%
# Create processes to aggregate
myMesher = ot.IntervalMesher([100, 10])
lowerbound = [0.0, 0.0]
upperBound = [2.0, 4.0]
myInterval = ot.Interval(lowerbound, upperBound)
myMesh = myMesher.build(myInterval)
myProcess1 = ot.WhiteNoise(ot.Normal(), myMesh)
myProcess2 = ot.WhiteNoise(ot.Triangular(), myMesh)

# %%
# Draw values of a realization of the 2nd process
marginal = ot.HistogramFactory().build(myProcess1.getRealization().getValues())
graph = marginal.drawPDF()
view = viewer.View(graph)

# %%
# Create an aggregated process
myAggregatedProcess = ot.AggregatedProcess([myProcess1, myProcess2])

# %%
# Draw values of the realization on the 2nd marginal
marginal = ot.HistogramFactory().build(
    myAggregatedProcess.getRealization().getValues().getMarginal(0))
graph = marginal.drawPDF()
viewer.View(graph)
plt.show()
예제 #11
0
# %%
# We can display the parameters of the fitted distribution `myDistribution`:
print(myDistribution)

# %%
# We can also get the actual distribution (Weibull, Frechet or Gumbel) with the `getActualDistribution` method:
print(myDistribution.getActualDistribution())

# %%
# The given sample is then best described by a Frechet distribution.

# %%
# We draw the fitted distribution and a histogram of the data.
graph = myDistribution.drawPDF()
graph.add(ot.HistogramFactory().build(sample).drawPDF())
graph.setColors(["black", "red"])
graph.setLegends(["GEV fitting", "histogram"])
graph.setLegendPosition("topright")

view = viewer.View(graph)
axes = view.getAxes()
_ = axes[0].set_xlim(-20.0, 20.0)

# %%
# We compare different fitting strategies for this sample:
#
# - use the histogram from the data (red)
# - the GEV fitted distribution (black)
# - the pure Frechet fitted distribution (green)
# - the pure Gumbel fitted distribution (blue)
예제 #12
0
from openturns.viewer import View

# 1. Define the random variables
myParam = ot.GumbelAB(1013., 558.)
Q = ot.ParametrizedDistribution(myParam)
otLOW = ot.TruncatedDistribution.LOWER
Q = ot.TruncatedDistribution(Q, 0, otLOW)
Ks = ot.Normal(30.0, 7.5)
Ks = ot.TruncatedDistribution(Ks, 0, otLOW)

# 2. Define the function
formulas = ['(Q/(Ks*300.*sqrt(0.001)))^(3./5.)']
g = ot.SymbolicFunction(['Q', 'Ks'], formulas)

# 3. Create the joint distribution
inputDistribution = ot.ComposedDistribution((Q, Ks))
inputRandomVector = ot.RandomVector(inputDistribution)
outputRandomVector = ot.CompositeRandomVector(g, inputRandomVector)

# 4. Simple Monte-Carlo sampling
samplesize = 500
outputSample = outputRandomVector.getSample(samplesize)

# 6. Plot the histogram
histoGraph = ot.HistogramFactory().build(outputSample).drawPDF()
histoGraph.setTitle("Histogramme de la hauteur")
histoGraph.setXTitle("H (m)")
histoGraph.setYTitle("Frequence")
histoGraph.setLegends([""])
View(histoGraph)
예제 #13
0
corr[1, 0] = 0.3
distribution = ot.Normal([0, 0], [1, 2], corr)
ott.assert_almost_equal(distribution.getRoughness(),
                        compute_roughness_sampling(distribution))

distribution = ot.Epanechnikov()
ott.assert_almost_equal(distribution.getRoughness(), 3/5)

distribution = ot.Triangular()
ott.assert_almost_equal(distribution.getRoughness(), 2/3)

distribution = ot.Distribution(Quartic())
ott.assert_almost_equal(distribution.getRoughness(), 5/7)

# Testing Histogram ==> getSingularities
distribution = ot.HistogramFactory().buildAsHistogram(ot.Uniform(0, 1).getSample(100000))
ott.assert_almost_equal(distribution.getRoughness(), 1.0, 5e-4, 1e-5)
# Compute the roughness using width and height
width = distribution.getWidth()
height = distribution.getHeight()
roughness = sum([width[i] * height[i]**2 for i in range(len(height))])
ott.assert_almost_equal(distribution.getRoughness(), roughness)

# Large dimension with independent copula
# With small rho value, we should have results similar to 
# independent copula. But here we use the sampling method
# This allows the validation of this sampling method
corr = ot.CorrelationMatrix(5)
corr[1, 0] = 0.001
distribution = ot.Normal([0]*5, [1]*5, corr)
예제 #14
0
scale = [0.2, 0.2]
myCovModel = ot.ExponentialModel(scale, amplitude)
myXproc = ot.GaussianProcess(myCovModel, myMesh)
g = ot.SymbolicFunction(['x1'], ['exp(x1)'])
myDynTransform = ot.ValueFunction(g, myMesh)
myXtProcess = ot.CompositeProcess(myDynTransform, myXproc)

# %%
# Draw a field
field = myXtProcess.getRealization()
graph = field.drawMarginal(0)
view = viewer.View(graph)

# %%
# Draw values
marginal = ot.HistogramFactory().build(field.getValues())
graph = marginal.drawPDF()
view = viewer.View(graph)

# %%
# Build the transformed field through Box-Cox
myModelTransform = ot.BoxCoxFactory().build(field)
myStabilizedField = myModelTransform(field)

# %%
# Draw values
marginal = ot.HistogramFactory().build(myStabilizedField.getValues())
graph = marginal.drawPDF()
view = viewer.View(graph)
plt.show()
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View

distXgivenT = ot.Exponential()
distGamma = ot.Uniform(1.0, 2.0)
distAlpha = ot.Uniform(0.0, 0.1)
distTheta = ot.ComposedDistribution([distGamma, distAlpha])
rvTheta = ot.RandomVector(distTheta)

rvX = ot.ConditionalRandomVector(distXgivenT, rvTheta)
sampleX = rvX.getSample(1000)

histX = ot.HistogramFactory().build(sampleX)
graph = histX.drawPDF()
graph.setXTitle('x')
graph.setYTitle('pdf')

fig = plt.figure(figsize=(8, 4))
plt.suptitle(
    "Conditional Random Vector: Exp($\gamma$, $\lambda$), $\gamma \sim \mathcal{U}(1,2)$, $\lambda \sim \mathcal{U}(0,1)$"
)
axis = fig.add_subplot(111)
axis.set_xlim(auto=True)

View(graph, figure=fig, axes=[axis], add_legend=False)
#! /usr/bin/env python

import openturns as ot

ot.TESTPREAMBLE()

try:
    l = [1.0, 0.7, 1.2, 0.9]
    h = [0.5, 1.5, 3.5, 2.5]
    distribution = ot.Histogram(-1.5, l, h)
    size = 10000
    sample = distribution.getSample(size)
    factory = ot.HistogramFactory()
    estimatedDistribution = factory.build(sample)
    print("distribution=", repr(distribution))
    print("Estimated distribution=", repr(estimatedDistribution))
    estimatedDistribution = factory.build()
    print("Default distribution=", estimatedDistribution)
    estimatedHistogram = factory.buildAsHistogram(sample)
    print("Histogram          =", distribution)
    print("Estimated histogram=", estimatedHistogram)
    estimatedHistogram = factory.buildAsHistogram(sample, 0.1)
    print("Histogram          =", distribution)
    print("Estimated histogram=", estimatedHistogram)
    estimatedHistogram = factory.buildAsHistogram(sample, 15)
    print("Histogram          =", distribution)
    print("Estimated histogram=", estimatedHistogram)
    first = -2.
    width = ot.Point(5, 1.)
    estimatedHistogram = factory.buildAsHistogram(sample, first, width)
    print("Estimated histogram=", estimatedHistogram)
예제 #17
0
    def plot_histograms(self, savefig=False, show=True):
        """Plot histograms for all determined parameters.

        Parameters
        ----------

        savefig: bool, optional
            Set to True if you want to save the figure `histograms.pdf`.
        show: bool, optional
            Switch on or off if figures is shown.
        Notes
        -----

        Fails if values are too close to each other, i.e.
        the variance is very small.
        """
        if len(self.parameters) < 3:
            ncols = len(self.parameters)
            nrows = 1
        else:
            ncols = 3
            nrows = int(len(self.parameters) / 3)
            nrows += int(ceil((len(self.parameters) % 3) / 3))
        fig, ax = plt.subplots(nrows=nrows, ncols=ncols)
        r = 0
        c = 0
        for key in self.sampledict:
            graph = ot.HistogramFactory().build(self.sampledict[key]).drawPDF()
            graph.setTitle("Histogram for variables")
            graph.setXTitle(self.labels[key])
            if nrows == 1:
                View(graph, axes=[ax[c]], plot_kwargs={'label': "hist", 'c': 'black'})
                ymin, ymax = ax[c].get_ylim()
            else:
                View(graph, axes=[ax[r, c]], plot_kwargs={'label': "hist", 'c': 'black'})
                ymin, ymax = ax[r, c].get_ylim()
            kernel = ot.KernelSmoothing()
            graph_k = kernel.build(self.sampledict[key])
            graph_k = graph_k.drawPDF()
            graph_k.setTitle("Histogram for variables")
            graph_k.setXTitle(key)
            if nrows == 1:
                View(graph_k, axes=[ax[c]], plot_kwargs={'label': "smooth"})
                ymin1, ymax1 = ax[c].get_ylim()
                if ymax1 < ymax:
                    ax[c].set_ylim(ymin1, ymax)

            else:
                View(graph_k, axes=[ax[r, c]], plot_kwargs={'label': "smooth"})
                ymin1, ymax1 = ax[r, c].get_ylim()
                if ymax1 < ymax:
                    ax[r, c].set_ylim(ymin1, ymax)

            # jump to next ax object or next row
            c += 1
            if(c == 3):
                c = 0
                r += 1
        plt.tight_layout()
        if savefig:
            plt.savefig("histograms.pdf")
        if show:
            plt.show()
예제 #18
0
outputSample = model(inputSample)
cobwebValue = ot.VisualTest.DrawCobWeb(
    inputSample, outputSample, 2.5, 3.0, "red", False)
print("cobwebValue = ", cobwebValue)

cobwebQuantile = ot.VisualTest.DrawCobWeb(
    inputSample, outputSample, 0.7, 0.9, "red", False)
print("cobwebQuantile = ", cobwebQuantile)

# KendallPlot tests
size = 100
copula1 = ot.FrankCopula(1.5)
copula2 = ot.GumbelCopula(4.5)
sample1 = copula1.getSample(size)
sample1.setName("data 1")
sample2 = copula2.getSample(size)
sample2.setName("data 2")
kendallPlot1 = ot.VisualTest.DrawKendallPlot(sample1, copula2)
print("KendallPlot1 = ", kendallPlot1)

kendallPlot2 = ot.VisualTest.DrawKendallPlot(sample2, sample1)
print("KendallPlot2 = ", kendallPlot2)

# Clouds
sample = ot.Normal(4).getSample(200)
clouds = ot.VisualTest.DrawPairs(sample)
print("Clouds = ", clouds)
distribution = ot.ComposedDistribution([ot.HistogramFactory().build(sample.getMarginal(i)) for i in range(4)])
cloudsMarginals = ot.VisualTest.DrawPairsMarginals(sample, distribution)
print("CloudsMarginals = ", cloudsMarginals)
    inputDesign = ot.SobolIndicesExperiment(distribution,
                                            sampleSize).generate()
    outputDesign = gsobol(inputDesign, a)
    sensitivity_algorithm = ot.SaltelliSensitivityAlgorithm(
        inputDesign, outputDesign, sampleSize)
    fo = sensitivity_algorithm.getFirstOrderIndices()
    to = sensitivity_algorithm.getTotalOrderIndices()
    for j in range(d):
        sampleFirstMartinez[i, j] = fo[j]
    for j in range(d):
        sampleTotalMartinez[i, j] = to[j]

fig = pl.figure(figsize=(12, 8))
for j in range(d):
    ax = fig.add_subplot(2, 3, 1 + j)
    graph = ot.HistogramFactory().build(sampleFirstMartinez[:, j]).drawPDF()
    graph.setXTitle("S%d" % (d))
    graph.setLegends([""])
    _ = otv.View(graph, figure=fig, axes=[ax])
    ax = fig.add_subplot(2, 3, 4 + j)
    graph = ot.HistogramFactory().build(sampleTotalMartinez[:, j]).drawPDF()
    graph.setXTitle("ST%d" % (d))
    graph.setLegends([""])
    _ = otv.View(graph, figure=fig, axes=[ax])
_ = fig.suptitle("Martinez - N=%d - Repetitions = %d" %
                 (sampleSize, nrepetitions))

# Récupère l'intervalle de confiance bootstrap pour le dernier échantillon
alpha = sensitivity_algorithm.getConfidenceLevel()
foInterval = sensitivity_algorithm.getFirstOrderIndicesInterval()
foIntervalMin = foInterval.getLowerBound()
예제 #20
0
# %%
inputRandomVector = ot.ComposedDistribution([Q, K_s, Z_v, Z_m])

# %%
# Create a Monte-Carlo sample of the output H.

# %%
nbobs = 100
inputSample = inputRandomVector.getSample(nbobs)
outputH = g(inputSample)

# %%
# Observe the distribution of the output H.

# %%
graph = ot.HistogramFactory().build(outputH).drawPDF()
view = viewer.View(graph)

# %%
# Generate the observation noise and add it to the output of the model.

# %%
sigmaObservationNoiseH = 0.1 # (m)
noiseH = ot.Normal(0.,sigmaObservationNoiseH)
sampleNoiseH = noiseH.getSample(nbobs)
Hobs = outputH + sampleNoiseH

# %%
# Plot the Y observations versus the X observations.

# %%
# %%
uniform.computeCDF(3.5)

# %%
# The `getSample` method generates a sample.

# %%
sample = uniform.getSample(10)
sample

# %%
# The most common way to "see" a sample is to plot the empirical histogram.

# %%
sample = uniform.getSample(1000)
graph = ot.HistogramFactory().build(sample).drawPDF()
view = viewer.View(graph)

# %%
# Multivariate distributions with or without independent copula
# -------------------------------------------------------------

# %%
# We can create multivariate distributions by two different methods:
#
# - we can also create a multivariate distribution by combining a list of univariate marginal distribution and a copula,
# - some distributions are defined as multivariate distributions: `Normal`, `Dirichlet`, `Student`.
#
# Since the method based on a marginal and a copula is more flexible, we illustrate below this principle.

# %%
예제 #22
0
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot

ot.TESTPREAMBLE()

try:
    l = [1.0, 0.7, 1.2, 0.9]
    h = [0.5, 1.5, 3.5, 2.5]
    distribution = ot.Histogram(-1.5, l, h)
    size = 10000
    sample = distribution.getSample(size)
    factory = ot.HistogramFactory()
    estimatedDistribution = factory.build(sample)
    print("distribution=", repr(distribution))
    print("Estimated distribution=", repr(estimatedDistribution))
    estimatedDistribution = factory.build()
    print("Default distribution=", estimatedDistribution)
    estimatedHistogram = factory.buildAsHistogram(sample)
    print("Histogram          =", distribution)
    print("Estimated histogram=", estimatedHistogram)
    estimatedHistogram = factory.buildAsHistogram(sample, 0.1)
    print("Histogram          =", distribution)
    print("Estimated histogram=", estimatedHistogram)
    estimatedHistogram = factory.buildAsHistogram(sample, 15)
    print("Histogram          =", distribution)
    print("Estimated histogram=", estimatedHistogram)
    first = -2.
    width = ot.Point(5, 1.)
    estimatedHistogram = factory.buildAsHistogram(sample, first, width)
        criteriaComponent = dim - 1
    sortedSample = sample.sortAccordingToAComponent(criteriaComponent)   
    quantiles = sortedSample.computeQuantilePerComponent(alpha)
    quantileValue = quantiles[criteriaComponent]
    sortedSampleCriteria = sortedSample[:,criteriaComponent]
    indices = np.where(np.array(sortedSampleCriteria.asPoint())>quantileValue)[0]
    conditionnedSortedSample = sortedSample[int(indices[0]):,selectedComponent]
    return conditionnedSortedSample


# %%
# Create an histogram for the unconditional flowrates.

# %%
numberOfBins = 10
histogram = ot.HistogramFactory().buildAsHistogram(sampleQ,numberOfBins)

# %%
# Extract the sub-sample of the input flowrates Q which leads to large values of the output H.

# %%
alpha = 0.9
criteriaComponent = 4
selectedComponent = 0
conditionnedSampleQ = computeConditionnedSample(sample,alpha,criteriaComponent,selectedComponent)

# %%
# We could as well use:
# ```
# conditionnedHistogram = ot.HistogramFactory().buildAsHistogram(conditionnedSampleQ)
# ```
예제 #24
0
asCorrelation = True
isStationary = False
graph = cov.draw(0, 0, tmin, tmax, 128, isStationary, asCorrelation)
View(graph)

# Sample of coefficients Xi
sampleKsi = KLResult.project(outputSample)

# Chaque marginale est reconstruite par noyau gaussien
# False, 0, False: pas de binning (non aggergation des donnees dnas des segments), nbre de bins, pas d'effet de bord
nbmodes = sampleKsi.getDimension()
xi_marges = [ot.KernelSmoothing(ot.Normal(), False, 0, False).build(sampleKsi.getMarginal(i)) for i in range(nbmodes)]

# graphes des pdf marginales
for i in range(len(xi_marges)):
    monHisto = ot.HistogramFactory().build(sampleKsi.getMarginal(i)).drawPDF()
    monHisto.setColors(["blue"])
    graph.add(monHisto)
    graph = xi_marges[i].drawPDF()
    graph.setTitle(r"Distribution of $\xi$" + str(i))
    graph.setXTitle(r"$\xi$" + str(i))
    graph.setYTitle("PDF")
    graph.add(monHisto)
    graph.setLegends(["KS","Histogram"])
    View(graph)

# graphe des pairs dans l'espace des rangs
pairs = ot.Pairs(sampleKsi.rank())
pairs.setPointStyle("bullet")
View(pairs)
예제 #25
0
import openturns as ot
from openturns.viewer import View

ot.RandomGenerator.SetSeed(0)

sample = ot.Normal(3).getSample(100)
distribution = ot.ComposedDistribution(
    [ot.HistogramFactory().build(sample.getMarginal(i)) for i in range(3)])
graph = ot.VisualTest.DrawPairsMarginals(sample, distribution)

View(graph, figure_kw={'figsize': (6.0, 6.0)})
inputDistribution = cm.inputDistribution

# %%
# Create the Monte-Carlo sample.

# %%
sampleSize = 100
inputSample = inputDistribution.getSample(sampleSize)
outputStress = g(inputSample)
outputStress[0:5]

# %%
# Plot the histogram of the output.

# %%
histoGraph = ot.HistogramFactory().build(outputStress / 1.0e6).drawPDF()
histoGraph.setTitle("Histogram of the sample stress")
histoGraph.setXTitle("Stress (MPa)")
histoGraph.setLegends([""])
view = viewer.View(histoGraph)

# %%
# Generate observation noise.

# %%
stressObservationNoiseSigma = 10.0e6  # (Pa)
noiseSigma = ot.Normal(0.0, stressObservationNoiseSigma)
sampleNoiseH = noiseSigma.getSample(sampleSize)
observedStress = outputStress + sampleNoiseH

# %%
예제 #27
0
# Create the design of experiments
# --------------------------------

# %%
# We consider a simple Monte-Carlo sample as a design of experiments. This is why we generate an input sample using the `getSample` method of the distribution. Then we evaluate the output using the `model` function.

# %%
sampleSize_train = 20
X_train = myDistribution.getSample(sampleSize_train)
Y_train = model(X_train)

# %%
# The following figure presents the distribution of the vertical deviations Y on the training sample. We observe that the large deviations occur less often.

# %%
histo = ot.HistogramFactory().build(Y_train).drawPDF()
histo.setXTitle("Vertical deviation (cm)")
histo.setTitle("Distribution of the vertical deviation")
histo.setLegends([""])
view = viewer.View(histo)

# %%
# Create the metamodel
# --------------------

# %%
# We rely on `H-Matrix` approximation for accelerating the evaluation.
# We change default parameters (compression, recompression) to higher values. The model is less accurate but very fast to build & evaluate.

# %%
ot.ResourceMap.SetAsString("KrigingAlgorithm-LinearAlgebra", "HMAT")
예제 #28
0
R.setDescription(["R"])
C.setDescription(["C"])
Gamma.setDescription(["Gamma"])

# 4. Create the joint distribution function
inputRandomVector = ot.ComposedDistribution([Strain, R, C, Gamma])

# 5. Create the Monte-Carlo algorithm
sampleSize = 100
inputSample = inputRandomVector.getSample(sampleSize)
#print(inputSample)
outputSigma = f(inputSample)
#print(outputSigma)

# 7. Plot the histogram
histoGraph = ot.HistogramFactory().build(outputSigma / 1.e6).drawPDF()
histoGraph.setTitle("Histogramme de la contrainte")
histoGraph.setXTitle("Stress (MPa)")
histoGraph.setYTitle("Frequence")
#histoGraph.setBoundingBox([-1,7,0,0.60])
histoGraph.setLegends([""])
View(histoGraph)

# Generate observation noise
sigmaObservationNoiseSigma = 40.e6  # (Pa)
noiseSigma = ot.Normal(0., sigmaObservationNoiseSigma)
sampleNoiseH = noiseSigma.getSample(sampleSize)
observedSigma = outputSigma + sampleNoiseH

# Create and save sample
observedSample = ot.Sample(sampleSize, 2)