def plot_data_test(x_test, y_test):
    '''Plot the data (x_test,y_test) as a Curve, in dashed black'''
    graphF = ot.Curve(x_test, y_test)
    graphF.setLegend("Exact")
    graphF.setColor("black")
    graphF.setLineStyle("dashed")
    return graphF
Пример #2
0
def drawKSDistance(sample,
                   distribution,
                   observation,
                   D,
                   distFactory,
                   delta_x=ot.Point([1.0e-6])):
    graph = ot.Graph("KS Distance = %.4f" % (D), "X", "CDF", True, "topleft")
    # Vertical line at point x
    ECDF_index = sample.computeEmpiricalCDF(observation)
    ECDF_index_shifted = sample.computeEmpiricalCDF(observation - delta_x)
    CDF_index = distribution.computeCDF(observation)
    curve = ot.Curve(
        [observation[0], observation[0], observation[0]],
        [ECDF_index, ECDF_index_shifted, CDF_index],
    )
    curve.setColor("green")
    curve.setLegend("KS Statistics")
    curve.setLineWidth(4.0 * curve.getLineWidth())
    graph.add(curve)
    # Empirical CDF
    empiricalCDF = ot.UserDefined(sample).drawCDF()
    empiricalCDF.setColors(["blue"])
    empiricalCDF.setLegends(["Empirical DF"])
    graph.add(empiricalCDF)
    #
    distname = distFactory.getClassName()
    distribution = distFactory.build(sample)
    cdf = distribution.drawCDF()
    cdf.setLegends([distname])
    graph.add(cdf)
    return graph
Пример #3
0
def drawKSDistance(sample, distribution, observation, D, distFactory):
    graph = ot.Graph("KS Distance = %.4f" % (D), "X", "CDF", True, "topleft")
    # Thick vertical line at point x
    ECDF_x_plus = sample.computeEmpiricalCDF(observation)
    ECDF_x_minus = ECDF_x_plus - 1.0 / sample.getSize()
    CDF_index = distribution.computeCDF(observation)
    curve = ot.Curve(
        [observation[0], observation[0], observation[0]],
        [ECDF_x_plus, ECDF_x_minus, CDF_index],
    )
    curve.setLegend("KS Statistics")
    curve.setLineWidth(4.0 * curve.getLineWidth())
    graph.add(curve)
    # Empirical CDF
    empiricalCDF = ot.UserDefined(sample).drawCDF()
    empiricalCDF.setLegends(["Empirical DF"])
    graph.add(empiricalCDF)
    #
    distname = distFactory.getClassName()
    distribution = distFactory.build(sample)
    cdf = distribution.drawCDF()
    cdf.setLegends([distname])
    graph.add(cdf)
    graph.setColors(ot.Drawable.BuildDefaultPalette(3))
    return graph
Пример #4
0
def myPolynomialCurveFittingGraph(total_degree, x_train, y_train):
    """Returns the graphics for a polynomial curve fitting 
    with given total degree"""
    responseSurface, basis = myPolynomialDataFitting(total_degree, x_train,
                                                     y_train)
    # Graphics
    n_test = 100
    x_test = linearSample(0, 1, n_test)
    ypredicted_test = responseSurface(basis(x_test))
    # Graphics
    graph = ot.Graph("Polynomial curve fitting", "x", "y", True, "topright")
    # The "unknown" function
    curve = g.draw(0, 1)
    curve.setColors(["green"])
    graph.add(curve)
    # Training set
    cloud = ot.Cloud(x_train, y_train)
    cloud.setPointStyle("circle")
    cloud.setLegend("N=%d" % (x_train.getSize()))
    graph.add(cloud)
    # Predictions
    curve = ot.Curve(x_test, ypredicted_test)
    curve.setLegend("Polynomial Degree = %d" % (total_degree))
    curve.setColor("red")
    graph.add(curve)
    return graph
def plotMyBasicKriging(krigResult, xMin, xMax, X, Y, level = 0.95):
    '''
    Given a kriging result, plot the data, the kriging metamodel 
    and a confidence interval.
    '''
    samplesize = X.getSize()
    meta = krigResult.getMetaModel()
    graphKriging = meta.draw(xMin, xMax)
    graphKriging.setLegends(["Kriging"])
    # Create a grid of points and evaluate the function and the kriging
    nbpoints = 50
    xGrid = linearSample(xMin,xMax,nbpoints)
    yFunction = g(xGrid)
    yKrig = meta(xGrid)
    # Compute the conditional covariance
    epsilon = ot.Point(nbpoints,1.e-8)
    conditionalVariance = krigResult.getConditionalMarginalVariance(xGrid)+epsilon
    conditionalVarianceSample = ot.Sample([[cv] for cv in conditionalVariance])
    conditionalSigma = sqrt(conditionalVarianceSample)
    # Compute the quantile of the Normal distribution
    alpha = 1-(1-level)/2
    quantileAlpha = ot.DistFunc.qNormal(alpha)
    # Graphics of the bounds
    epsilon = 1.e-8
    dataLower = [yKrig[i,0] - quantileAlpha * conditionalSigma[i,0] for i in range(nbpoints)]
    dataUpper = [yKrig[i,0] + quantileAlpha * conditionalSigma[i,0] for i in range(nbpoints)]
    # Coordinates of the vertices of the Polygons
    vLow = [[xGrid[i,0],dataLower[i]] for i in range(nbpoints)] 
    vUp = [[xGrid[i,0],dataUpper[i]] for i in range(nbpoints)]
    # Compute the Polygon graphics
    boundsPoly = plot_kriging_bounds(vLow,vUp,nbpoints)
    boundsPoly.setLegend("95% bounds")
    # Validate the kriging metamodel
    mmv = ot.MetaModelValidation(xGrid, yFunction, meta)
    Q2 = mmv.computePredictivityFactor()[0]
    # Plot the function
    graphFonction = ot.Curve(xGrid,yFunction)
    graphFonction.setLineStyle("dashed")
    graphFonction.setColor("magenta")
    graphFonction.setLineWidth(2)
    graphFonction.setLegend("Function")
    # Draw the X and Y observed
    cloudDOE = ot.Cloud(X, Y)
    cloudDOE.setPointStyle("circle")
    cloudDOE.setColor("red")
    cloudDOE.setLegend("Data")
    # Assemble the graphics
    graph = ot.Graph()
    graph.add(boundsPoly)
    graph.add(graphFonction)
    graph.add(cloudDOE)
    graph.add(graphKriging)
    graph.setLegendPosition("bottomright")
    graph.setAxes(True)
    graph.setGrid(True)
    graph.setTitle("Size = %d, Q2=%.2f%%" % (samplesize,100*Q2))
    graph.setXTitle("X")
    graph.setYTitle("Y")
    return graph
def plot_exact_model():
    graph = ot.Graph('', 'x', '', True, '')
    y_test = model(x_test)
    curveModel = ot.Curve(x_test, y_test)
    curveModel.setLineStyle("solid")
    curveModel.setColor("black")
    graph.add(curveModel)
    cloud = ot.Cloud(Xtrain, Ytrain)
    cloud.setColor("black")
    cloud.setPointStyle("fsquare")
    graph.add(cloud)
    return graph
Пример #7
0
def drawPDFAndInterval1D(distribution, interval, alpha):
    '''
    Draw the PDF of the distribution and the lower and upper bounds of an interval.
    '''
    xmin = interval.getLowerBound()[0]
    xmax = interval.getUpperBound()[0]
    graph = distribution.drawPDF()
    yvalue = distribution.computePDF(xmin)
    curve = ot.Curve([[xmin,0.],[xmin,yvalue],[xmax,yvalue],[xmax,0.]])
    curve.setColor("black")
    graph.add(curve)
    graph.setTitle("%.2f%% of the distribution, lower bound = %.3f, upper bound = %.3f" % (100*alpha, xmin,xmax))
    return graph
Пример #8
0
def drawKL(scaledKL, KLev, mesh, title="Scaled KL modes"):
    graph_modes = scaledKL.drawMarginal()
    graph_modes.setTitle(title + " scaled KL modes")
    graph_modes.setXTitle('$x$')
    graph_modes.setYTitle(r'$\sqrt{\lambda_i}\phi_i$')
    data_ev = [[i, KLev[i]] for i in range(scaledKL.getSize())]
    graph_ev = ot.Graph()
    graph_ev.add(ot.Curve(data_ev))
    graph_ev.add(ot.Cloud(data_ev))
    graph_ev.setTitle(title + " KL eigenvalues")
    graph_ev.setXTitle('$k$')
    graph_ev.setYTitle(r'$\lambda_i$')
    graph_ev.setAxes(True)
    graph_ev.setGrid(True)
    graph_ev.setLogScale(2)
    bb = graph_ev.getBoundingBox()
    lower = bb.getLowerBound()
    lower[1] = 1.0e-7
    bb = ot.Interval(lower, bb.getUpperBound())
    graph_ev.setBoundingBox(bb)
    return graph_modes, graph_ev
Пример #9
0
def drawKSDistance(sample, distribution, x, D, distFactory):
    graph = ot.Graph("KS Distance = %.4f" % (D), "X", "CDF", True, "topleft")
    # Vertical line at point x
    ECDF_index = sample.computeEmpiricalCDF([x])
    CDF_index = distribution.computeCDF(x)
    curve = ot.Curve([x, x], [ECDF_index, CDF_index])
    curve.setColor("green")
    curve.setLegend("KS Statistics")
    curve.setLineWidth(4. * curve.getLineWidth())
    graph.add(curve)
    # Empirical CDF
    empiricalCDF = ot.UserDefined(sample).drawCDF()
    empiricalCDF.setColors(["blue"])
    empiricalCDF.setLegends(["Empirical DF"])
    graph.add(empiricalCDF)
    #
    distname = distFactory.getClassName()
    distribution = distFactory.build(sample)
    cdf = distribution.drawCDF()
    cdf.setLegends([distname])
    graph.add(cdf)
    return graph
 def _drawObservationsVsPredictions1Dimension(self, outputObservations,
                                              outputAtPrior,
                                              outputAtPosterior):
     """
     Plots the output of the model depending 
     on the output observations before and after calibration.
     Can manage only 1D samples.
     """
     yDim = outputObservations.getDimension()
     ydescription = outputObservations.getDescription()
     xlabel = "%s Observations" % (ydescription[0])
     ylabel = "%s Predictions" % (ydescription[0])
     graph = ot.Graph("", xlabel, ylabel, True, "topleft")
     # Plot the diagonal
     if (yDim == 1):
         curve = ot.Curve(outputObservations, outputObservations)
         curve.setColor(self.observationColor)
         graph.add(curve)
     else:
         raise TypeError('Output observations are not 1D.')
     # Plot the predictions before
     yPriorDim = outputAtPrior.getDimension()
     if (yPriorDim == 1):
         cloud = ot.Cloud(outputObservations, outputAtPrior)
         cloud.setColor(self.priorColor)
         cloud.setLegend("Prior")
         graph.add(cloud)
     else:
         raise TypeError('Output prior predictions are not 1D.')
     # Plot the predictions after
     yPosteriorDim = outputAtPosterior.getDimension()
     if (yPosteriorDim == 1):
         cloud = ot.Cloud(outputObservations, outputAtPosterior)
         cloud.setColor(self.posteriorColor)
         cloud.setLegend("Posterior")
         graph.add(cloud)
     else:
         raise TypeError('Output posterior predictions are not 1D.')
     return graph
phi = ot.ParametricFunction(f, [2], [0.0])
solver = ot.RungeKutta(phi)

initialState = [2.0, 2.0]
nt = 47
dt = 0.1
timeGrid = ot.RegularGrid(0.0, dt, nt)
result = solver.solve(initialState, timeGrid)
xMin = result.getMin()
xMax = result.getMax()
delta = 0.2 * (xMax - xMin)
mesh = ot.IntervalMesher([12] * 2).build(
    ot.Interval(xMin - delta, xMax + delta))
field = ot.Field(mesh, phi(mesh.getVertices()))
ot.ResourceMap.SetAsScalar("Field-ArrowScaling", 0.1)
graph = field.draw()
cloud = ot.Cloud(mesh.getVertices())
cloud.setColor("black")
graph.add(cloud)
curve = ot.Curve(result)
curve.setColor("red")
curve.setLineWidth(2)
graph.add(curve)

fig = plt.figure()
ax = fig.add_subplot(111)
View(graph, figure=fig)
plt.suptitle("Lotka-Volterra ODE system")
plt.xlabel(r'$y_0$')
plt.ylabel(r'$y_1$')
plt.grid()
# Evaluate the covariance function at each time step
# Care : if estimated from a time series, the time grid has changed
for i in range(N):
    tau = tgrid.getValue(i)
    cov = estimatedModel_PS(tau)

# %%
# Drawing...
sampleValueEstimated = ot.Sample(N, 1)
sampleValueModel = ot.Sample(N, 1)
for i in range(N):
    t = tgrid.getValue(i)
    for j in range(i - 1):
        s = tgrid.getValue(j)
        estimatedValue = estimatedModel_PS(t, s)
        modelValue = covmodel(t, s)
        if j == 0:
            sampleValueEstimated[i, 0] = estimatedValue[0, 0]
            sampleValueModel[i, 0] = modelValue[0, 0]
sampleT = tgrid.getVertices()
graph = ot.Graph('Covariance estimation', 'time', 'Covariance value C(0,t)', True)
curveEstimated = ot.Curve(sampleT, sampleValueEstimated, 'Estimated model')
graph.add(curveEstimated)
curveModel = ot.Curve(sampleT, sampleValueModel, 'Exact model')
curveModel.setColor('red')
graph.add(curveModel)
graph.setLegendPosition('topright')
view = viewer.View(graph)
plt.show()
Пример #13
0
exceedanceNumbers = ot.Sample()
probabilityEstimateSample = ot.Sample()
for i in range(size):
    point = distNormal.getRealization()
    iterThreshold.increment(point)
    numberOfExceedances = iterThreshold.getThresholdExceedance()[0]
    exceedanceNumbers.add([numberOfExceedances])
    probabilityEstimate = numberOfExceedances / iterThreshold.getIterationNumber(
    )
    probabilityEstimateSample.add([probabilityEstimate])

# %%
# We display the evolution of the number of exceedances.

# %%
curve = ot.Curve(exceedanceNumbers)
curve.setLegend("number of exceedance")
#
graph = ot.Graph(
    "Evolution of the number of exceedances",
    "iteration nb",
    "number of exceedances",
    True,
)
graph.add(curve)
graph.setLegends(["number of exceedances"])
graph.setLegendPosition("bottomright")
view = otv.View(graph)

# %%
# The following plot shows that the probability of exceeding the threshold converges.
def plot_data_kriging(x_test, y_test_MM):
    '''Plots (x_test,y_test_MM) from the metamodel as a Curve, in blue'''
    graphK = ot.Curve(x_test, y_test_MM)
    graphK.setColor("blue")
    graphK.setLegend("Kriging")
    return graphK
Пример #15
0
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
filteringWindow = ot.Hanning()
numPoints = 512
data = ot.Sample(numPoints, 2)
for i in range(numPoints):
    x = -0.1 + (1.2 * i) / (numPoints - 1.0)
    data[i, 0] = x
    data[i, 1] = filteringWindow(x)
graph = ot.Graph()
graph.setXTitle('$tau$')
graph.setYTitle('W')
graph.add(ot.Curve(data))
graph.setColors(['red'])
fig = plt.figure(figsize=(10, 4))
plt.suptitle(str(filteringWindow))
filtering_axis = fig.add_subplot(111)
View(graph, figure=fig, axes=[filtering_axis], add_legend=False)
Пример #16
0
N = 10000
distribution = ot.Normal(2)
X = ot.RandomVector(distribution)
f = ot.SymbolicFunction(["x", "y"], ["x^2+y^2"])
Y = ot.RandomVector(f, X)
event = ot.Event(Y, ot.Greater(), threshold)
algo = ot.ProbabilitySimulationAlgorithm(event, ot.MonteCarloExperiment(1))
algo.setConvergenceStrategy(ot.Full())
algo.setMaximumOuterSampling(N)
algo.setMaximumCoefficientOfVariation(0.0)
algo.setMaximumStandardDeviation(0.0)
algo.run()
pRef = ot.ChiSquare(2).computeComplementaryCDF(threshold)

# Draw convergence
graph = algo.drawProbabilityConvergence()
graph.setXMargin(0.0)
graph.setLogScale(1)
graph.setLegendPosition("topright")
graph.setXTitle(r"n")
graph.setYTitle(r"$\hat{p}_n$")
graph.setTitle("Monte Carlo simulation - convergence history")
ref = ot.Curve([[1, pRef], [N, pRef]])
ref.setColor("black")
ref.setLineStyle("dashed")
ref.setLegend(r"$p_{ref}$")
graph.add(ref)
view = View(graph, (800, 600))
view.save("../plot_monte_carlo.png")
view.close()
    def drawOutlierTrajectories(self,
                                drawInliers=False,
                                discreteMean=False,
                                bounds=True):
        """Plot outlier trajectories from the :attr:`ProcessSample`.

        :param bool drawInliers: Whether to draw inliers or not.
        :param bool discreteMean: Whether to compute the mean per vertex or
          by minimal volume levelset using the distribution.
        :param bool bounds: Whether to plot bounds.
        :return: OpenTURNS graph object.
        :rtype: :class:`openturns.Graph`
        """
        graph = ot.Graph(
            "Outliers at alpha=%.2f" % (self.densityPlot.outlierAlpha), '', '',
            True, 'topright')

        # Get the mesh
        mesh = self.processSample.getMesh()
        t = np.ravel(mesh.getVertices())

        # Plot outlier trajectories
        outlier_samples = np.array(self.getOutlierSamples())

        if outlier_samples.size != 0:
            for outlier_sample in outlier_samples.T:
                curve = ot.Curve(t, outlier_sample)
                curve.setColor('red')
                graph.add(curve)

        # Plot inlier trajectories
        inlier_samples = np.array(self.getInlierSamples())

        if drawInliers:
            for inlier_sample in inlier_samples.T:
                curve = ot.Curve(t, inlier_sample)
                curve.setColor('blue')
                graph.add(curve)

        # Plot inlier bounds

        def fill_between_(lower, upper, legend):
            """Draw a shaded area between two curves."""
            disc = len(lower)
            palette = ot.Drawable.BuildDefaultPalette(2)[1]
            poly_data = [[lower[i], lower[i + 1], upper[i + 1], upper[i]]
                         for i in range(disc - 1)]

            polygon = [
                ot.Polygon(poly_data[i], palette, palette)
                for i in range(disc - 1)
            ]
            bounds_poly = ot.PolygonArray(polygon)
            bounds_poly.setLegend(legend)

            return bounds_poly

        if bounds:
            inlier_min = list(zip(t, np.min(inlier_samples, axis=1)))
            inlier_max = list(zip(t, np.max(inlier_samples, axis=1)))

            bounds = fill_between_(
                inlier_min, inlier_max,
                "Confidence interval at alpha=%.2f" % (self.outlierAlpha))
            graph.add(bounds)

        # Plot central curve
        if discreteMean:
            central_field = self.processSample.computeMean()
        else:
            central_field = self.processSample[self.densityPlot.idx_mode]

        curve = ot.Curve(t[:, None], central_field.getValues(),
                         'Central curve')
        curve.setColor('black')
        graph.add(curve)

        return graph
Пример #18
0
    # Show axes as prescribed by getAxes()
    graph = ot.Normal().drawPDF()
    graph.setAxes(False)
    view = View(graph)
    view.ShowAll(block=True)

    # test _repr_png_
    png = graph._repr_png_()
    assert (b'PNG' in png[:10])

    # BuildDefaultPalette, BuildTableauPalette
    ncurves = 5
    graph = ot.Graph("BuildPalette", "X", "Y", True, "topright")
    n = 20
    x = ot.Sample([[i] for i in range(n)])
    for i in range(ncurves):
        y = ot.Normal().getSample(n)
        curve = ot.Curve(x, y)
        curve.setLegend("Curve #%d" % (i))
        graph.add(curve)
    palette = ot.Drawable.BuildDefaultPalette(ncurves)
    graph.setColors(palette)
    view = View(graph)
    palette = ot.Drawable.BuildTableauPalette(ncurves)
    graph.setColors(palette)
    view = View(graph)

except:
    traceback.print_exc()
    os._exit(1)
Пример #19
0
# %%
# In order to compute the kriging error, we can consider the conditional variance. The `getConditionalCovariance` method returns the covariance matrix `covGrid` evaluated at each points in the given sample. Then we can use the diagonal coefficients in order to get the marginal conditional kriging variance. Since this is a variance, we use the square root in order to compute the standard deviation. However, some coefficients in the diagonal are very close to zero and nonpositive, which leads to an exception of the sqrt function. This is why we add an epsilon on the diagonal (nugget factor), which prevents this issue.

# %%
sqrt = ot.SymbolicFunction(["x"], ["sqrt(x)"])
epsilon = ot.Sample(n_test, [1.e-8])
conditionalVariance = result.getConditionalMarginalVariance(x_test) + epsilon
conditionalSigma = sqrt(conditionalVariance)

# %%
# The following figure presents the conditional standard deviation depending on :math:`x`.

# %%
graph = ot.Graph('Conditional standard deviation', 'x',
                 'Conditional standard deviation', True, '')
curve = ot.Curve(x_test, conditionalSigma)
graph.add(curve)
view = viewer.View(graph)

# %%
# We now compute the bounds of the confidence interval. For this purpose we define a small function
# `computeBoundsConfidenceInterval` :


# %%
def computeBoundsConfidenceInterval(quantileAlpha):
    dataLower = [[y_test_MM[i, 0] - quantileAlpha * conditionalSigma[i, 0]]
                 for i in range(n_test)]
    dataUpper = [[y_test_MM[i, 0] + quantileAlpha * conditionalSigma[i, 0]]
                 for i in range(n_test)]
    dataLower = ot.Sample(dataLower)
Пример #20
0
n_test = 50
x_test = linearSample(0, 1, n_test)
y_test = responseSurface(basis(x_test))

# %%
graph = ot.Graph("Polynomial curve fitting", "x", "y", True, "topright")
# The "unknown" function
curve = g.draw(0, 1)
curve.setColors(["green"])
graph.add(curve)
# Training set
cloud = ot.Cloud(x_train, y_train)
cloud.setPointStyle("circle")
graph.add(cloud)
# Predictions
curve = ot.Curve(x_test, y_test)
curve.setLegend("Polynomial Degree = %d" % (total_degree))
curve.setColor("red")
graph.add(curve)
view = otv.View(graph)

# %%
# For each observation in the training set, the error is the vertical distance between the model and the observation.

# %%
graph = ot.Graph(
    "Least squares minimizes the sum of the squares of the vertical bars",
    "x",
    "y",
    True,
    "topright",
Пример #21
0
print("zi1D = ", zi1D)
print("zi2D = ", zi2D)

# %%
# We can represent the boundary of the event in the standard space : that is a composition of the
# hyperbole :math:`h : x \mapsto 10/x` and the inverse transform :math:`T_1^{-1}` defined by
# :math:`inverseTransformX1`.
failureBoundaryPhysicalSpace = ot.SymbolicFunction(['x'], ['10.0 / x'])
failureBoundaryStandardSpace = ot.ComposedFunction(
    failureBoundaryPhysicalSpace, inverseTransformX1)
x = np.linspace(1.1, 5.0, 100)
cx = np.array([failureBoundaryStandardSpace([xi])[0] for xi in x])

graphStandardSpace = ot.Graph('Failure event in the standard space', r'$u_1$',
                              r'$u_2$', True, '')
curveCX = ot.Curve(x, cx, 'Boundary of the event $\partial \mathcal{D}$')
curveCX.setLineStyle("solid")
curveCX.setColor("blue")
graphStandardSpace.add(curveCX)

# %%
# We add the origin to the previous graph.
cloud = ot.Cloud(ot.Point([0.0]), ot.Point([0.0]))
cloud.setColor("black")
cloud.setPointStyle("fcircle")
cloud.setLegend("origin")
graphStandardSpace.add(cloud)
graphStandardSpace.setGrid(True)
graphStandardSpace.setLegendPosition("bottomright")

# Some annotation
data = ot.Sample(n,2)
data[:,0] = weibullSample
data[:,1] = uniformSample
data.setDescription(["x","p"])

# %%
sample = ot.Sample(data.sort())
sample[0:5,:]

# %%
weibullSample = sample[:,0]
uniformSample = sample[:,1]

# %%
graph = ot.Graph("Weibull alpha=%s, beta=%s, n=%s" % (alpha,beta,n),"x","U",True)
# Add the CDF plot
curve = W.drawCDF()
curve.setColors(["blue"])
graph.add(curve)
# Plot dashed horizontal & vertical lines
for i in range(n):
    curve = ot.Curve([0.,weibullSample[i,0], weibullSample[i,0]],[uniformSample[i,0],uniformSample[i,0], 0.])
    curve.setColor("red")
    curve.setLineStyle("dashed")
    graph.add(curve)
view = viewer.View(graph)
plt.show()

# %%
# This graphics must be read from the U axis on the left to the blue curve (representing the CDF), and down to the X axis. We see that the horizontal lines on the U axis follow the uniform distribution. On the other hand, the vertical lines (on the X axis) follow the Weibull distribution.
Пример #23
0
# Create the spectral model:
spectralModel = ot.UserDefinedSpectralModel(fgrid, coll)

# Get the spectral density function computed at first frequency values
firstFrequency = fgrid.getStart()
frequencyStep = fgrid.getStep()
firstHermitian = spectralModel(firstFrequency)

# Get the spectral function at frequency + 0.3 * frequencyStep
spectralModel(frequency + 0.3 * frequencyStep)

# %%
# Draw the spectral density

# Create the curve of the spectral function
x = ot.Sample(N, 2)
for k in range(N):
    frequency = fgrid.getValue(k)
    x[k, 0] = frequency
    value = spectralModel(frequency)
    x[k, 1] = value[0, 0].real

# Create the graph
graph = ot.Graph('Spectral user-defined model', 'Frequency',
                 'Spectral density value', True)
curve = ot.Curve(x, 'UserSpectral')
graph.add(curve)
graph.setLegendPosition('topright')
view = viewer.View(graph)
plt.show()
Пример #24
0
# On the right, the conditional kriging variance
graph = ot.Graph("", "x", "Conditional kriging variance", True, '')
# Sample for the data
sample = ot.Sample(n_pt, 2)
sample[:, 0] = x
cloud = ot.Cloud(sample)
cloud.setColor("red")
graph.add(cloud)
# Sample for the variance
sample = ot.Sample(n_pts_plot, 2)
sample[:, 0] = x_plot
variance = [[krigingResult.getConditionalCovariance(xx)[0, 0]]
            for xx in x_plot]
sample[:, 1] = variance
curve = ot.Curve(sample)
curve.setColor("green")
graph.add(curve)
View(graph, axes=[ax2])

fig.suptitle("Kriging result")

# %%
# Display the confidence interval
# -------------------------------

# %%
# sphinx_gallery_thumbnail_number = 3
level = 0.95
quantile = ot.Normal().computeQuantile((1 - level) / 2)[0]
borne_sup = krigingMeta(x_plot) + quantile * np.sqrt(variance)
algo = ot.KrigingAlgorithm(myTransform(Xtrain), Ytrain, covarianceModel, basis)

# %%
# We can run the algorithm and store the result :
algo.run()
result = algo.getResult()

# %%
# The metamodel is the following :class:`~openturns.ComposedFunction` :
metamodel = ot.ComposedFunction(result.getMetaModel(), myTransform)

# %%
# We can draw the metamodel and the exact model on the same graph.
graph = plot_exact_model()
y_test = metamodel(x_test)
curve = ot.Curve(x_test, y_test)
curve.setLineStyle("dashed")
curve.setColor("red")
graph.add(curve)
graph.setLegends(['exact model', 'training data', 'kriging metamodel'])
graph.setLegendPosition("bottom")
graph.setTitle('1D Kriging : exact model and metamodel')
view = otv.View(graph)

# %%
# We can retrieve the calibrated trend coefficient :
c0 = result.getTrendCoefficients()
print("The trend is the curve m(x) = %.6e" % c0[0][0])

# %%
# We also pay attention to the trained covariance model and observe the values
Пример #26
0
cloud1.setLineWidth(2)
graph.add(cloud1)

size = 150 - size

# Data
x2 = ot.Uniform(1.0, 9.0).getSample(size)
y2 = ot.Uniform(0.0, 120.0).getSample(size)
# Merge with previous data
x = ot.Sample(x1)
y = ot.Sample(y1)
x.add(x2)
y.add(y2)
# Quadratic model
algo = ot.QuadraticLeastSquares(x, y)
algo.run()
quadratic = algo.getMetaModel()

graph = ot.Graph("Null Spearman coefficient", "u", "v", True, "")
graph.add(cloud1)
cloud2 = ot.Cloud(x2, y2)
cloud2.setPointStyle("square")
cloud2.setColor("blue")
cloud2.setLineWidth(2)
graph.add(cloud2)
curve2 = ot.Curve(x, quadratic(x))
curve2.setColor("black")
curve2.setLineWidth(2)
graph.add(curve2)

View(graph)
Пример #27
0
# With the model, we want to compare values
# We compare values computed with theoritical values
plotSample = ot.Sample(frequencyGrid.getN(), 3)

# Loop of comparison ==> data are saved in plotSample
for k in range(frequencyGrid.getN()):
    freq = frequencyGrid.getStart() + k * frequencyGrid.getStep()
    plotSample[k, 0] = freq
    plotSample[k, 1] = abs(estimatedModel_PS(freq)[0, 0])
    plotSample[k, 2] = abs(model(freq)[0, 0])

# Some cosmetics : labels, legend position, ...
graph = ot.Graph("Estimated spectral function - Validation", "Frequency",
                 "Spectral density function", True, "topright", 1.0,
                 ot.GraphImplementation.LOGY)

# The first curve is the estimate density as function of frequency
curve1 = ot.Curve(plotSample.getMarginal([0, 1]))
curve1.setColor('blue')
curve1.setLegend('estimate model')

# The second curve is the theoritical density as function of frequency
curve2 = ot.Curve(plotSample.getMarginal([0, 2]))
curve2.setColor('red')
curve2.setLegend('Cauchy model')

graph.add(curve1)
graph.add(curve2)
view = viewer.View(graph)
plt.show()
Пример #28
0
 def getResiduals(self):
     theGraph = ot.Graph('Residuals','varying dimension','residual',True,'')
     theCurve = ot.Curve(list(range(len(self.__residuals__))),
                         self.__residuals__, 'residuals')
     theGraph.add(theCurve)
     ot.Show(theGraph)
    plotSample[k, 1] = abs(myEstimatedModel_PS(freq)[0, 0])
    plotSample[k, 2] = abs(model(freq)[0, 0])


# Graph section
# We build 2 curves
# each one is function of frequency values
ind = ot.Indices(2)
ind.fill()

# Some cosmetics : labels, legend position, ...
graph = ot.Graph("Spectral model estimation", "Frequency",
                 "Spectral density function", True, "topright", 1.0, ot.GraphImplementation.LOGY)

# The first curve is the estimate density as function of frequency
curve1 = ot.Curve(plotSample.getMarginal(ind))
curve1.setColor('blue')
curve1.setLegend('estimate model')

# The second curve is the theoritical density as function of frequency
ind[1] = 2
curve2 = ot.Curve(plotSample.getMarginal(ind))
curve2.setColor('red')
curve2.setLegend('Cauchy model')

graph.add(curve1)
graph.add(curve2)

fig = plt.figure(figsize=(10, 4))
graph_axis = fig.add_subplot(111)
view = View(graph, figure=fig, axes=[graph_axis], add_legend=False)
Пример #30
0
# %%
# Create the covariance model
covmodel = ot.UserDefinedStationaryCovarianceModel(mesh, coll)

# One vertex of the mesh
tau = 1.5

# Get the covariance function computed at the vertex tau
covmodel(tau)

# %%
# Graph of the spectral function
x = ot.Sample(N, 2)
for k in range(N):
    t = mesh.getValue(k)
    x[k, 0] = t
    value = covmodel(t)
    x[k, 1] = value[0, 0]

# Create the curve of the spectral function
curve = ot.Curve(x, 'User Model')

# Create the graph
myGraph = ot.Graph('User covariance model', 'Time',
                   'Covariance function', True)
myGraph.add(curve)
myGraph.setLegendPosition('topright')
view = viewer.View(myGraph)
plt.show()