Пример #1
0
    def drawSample(self, inputSample, outputSample):
        """
        Draw the sample of an event.

        The points inside and outside the event are colored.

        Parameters
        ----------
        inputSample: an ot.Sample
            The input 2D sample.

        outputSample: an ot.Sample
            The output 1D sample.

        Returns
        -------
        None.

        """
        if inputSample.getDimension() != 2:
            raise ValueError("The input dimension of the input sample "
                             "is equal to %d but should be 2." %
                             (inputSample.getDimension()))
        #
        threshold = self.event.getThreshold()
        g = self.event.getFunction()
        operator = self.event.getOperator()
        #
        sampleSize = outputSample.getSize()
        insideIndices = []
        outsideIndices = []
        for i in range(sampleSize):
            y = outputSample[i, 0]
            isInside = operator(y, threshold)
            if isInside:
                insideIndices.append(i)
            else:
                outsideIndices.append(i)
        #
        insideSample = ot.Sample([inputSample[i] for i in insideIndices])
        outsideSample = ot.Sample([inputSample[i] for i in outsideIndices])
        #
        description = g.getInputDescription()
        title = "Points X s.t. g(X) %s %s" % (operator, threshold)
        graph = ot.Graph(title, description[0], description[1], True, "")
        if len(insideIndices) > 0:
            cloud = ot.Cloud(insideSample, self.insideEventPointColor,
                             "fsquare", "In")
            graph.add(cloud)
        if len(outsideIndices) > 0:
            cloud = ot.Cloud(outsideSample, self.outsideEventPointColor,
                             "fsquare", "Out")
            graph.add(cloud)
        graph.setLegendPosition("topright")
        return graph
Пример #2
0
def draw(dist, Y):
    g = ot.Graph()
    g.setAxes(True)
    g.setGrid(True)
    c = ot.Cloud(dist.getSample(10000))
    c.setColor("red")
    c.setPointStyle("bullet")
    g.add(c)
    c = ot.Cloud(Y)
    c.setColor("black")
    c.setPointStyle("bullet")
    g.add(c)
    g.setBoundingBox(ot.Interval(
        Y.getMin()-0.5*Y.computeRange(), Y.getMax()+0.5*Y.computeRange()))
    return g
Пример #3
0
def myPolynomialCurveFittingGraph(total_degree, x_train, y_train):
    """Returns the graphics for a polynomial curve fitting 
    with given total degree"""
    responseSurface, basis = myPolynomialDataFitting(total_degree, x_train,
                                                     y_train)
    # Graphics
    n_test = 100
    x_test = linearSample(0, 1, n_test)
    ypredicted_test = responseSurface(basis(x_test))
    # Graphics
    graph = ot.Graph("Polynomial curve fitting", "x", "y", True, "topright")
    # The "unknown" function
    curve = g.draw(0, 1)
    curve.setColors(["green"])
    graph.add(curve)
    # Training set
    cloud = ot.Cloud(x_train, y_train)
    cloud.setPointStyle("circle")
    cloud.setLegend("N=%d" % (x_train.getSize()))
    graph.add(cloud)
    # Predictions
    curve = ot.Curve(x_test, ypredicted_test)
    curve.setLegend("Polynomial Degree = %d" % (total_degree))
    curve.setColor("red")
    graph.add(curve)
    return graph
Пример #4
0
def DrawUnivariateSampleDistribution(sample, distribution):
    """
    Draw a unidimensional sample and its distribution.
    
    Parameters
    ----------
    sample : ot.Sample
        A dimension 1 sample.
    distribution : ot.Distribution
        A dimension 1 distribution.

    Returns
    -------
    graph : ot.Graph
        The PDF plot and the sample on the X axis.
    """
    if sample.getDimension()!=1:
        raise ValueError("Expect a 1 dimension sample, but dimension is %d" % (sample.getDimension()))
    if distribution.getDimension()!=1:
        raise ValueError("Expect a 1 dimension distribution, but dimension is %d" % (distribution.getDimension()))
    graph = distribution.drawPDF()
    # Add points on X axis
    sample_size = sample.getSize()
    data = ot.Sample(sample_size, 2)
    data[:, 0] = sample
    cloud = ot.Cloud(data)
    graph.add(cloud)
    return graph
def drawLevelSetContour2D(distribution,
                          numberOfPointsInXAxis,
                          alpha,
                          threshold,
                          sampleSize=500):
    '''
    Compute the minimum volume LevelSet of measure equal to alpha and get the 
    corresponding density value (named threshold).
    Generate a sample of the distribution and draw it. 
    Draw a contour plot for the distribution, where the PDF is equal to threshold. 
    '''
    sample = distribution.getSample(sampleSize)
    X1min = sample[:, 0].getMin()[0]
    X1max = sample[:, 0].getMax()[0]
    X2min = sample[:, 1].getMin()[0]
    X2max = sample[:, 1].getMax()[0]
    xx = ot.Box([numberOfPointsInXAxis], ot.Interval([X1min],
                                                     [X1max])).generate()
    yy = ot.Box([numberOfPointsInXAxis], ot.Interval([X2min],
                                                     [X2max])).generate()
    xy = ot.Box([numberOfPointsInXAxis, numberOfPointsInXAxis],
                ot.Interval([X1min, X2min], [X1max, X2max])).generate()
    data = distribution.computePDF(xy)
    graph = ot.Graph('', 'X1', 'X2', True, 'topright')
    labels = ["%.2f%%" % (100 * alpha)]
    contour = ot.Contour(xx, yy, data, [threshold], labels)
    contour.setColor('black')
    graph.setTitle("%.2f%% of the distribution, sample size = %d" %
                   (100 * alpha, sampleSize))
    graph.add(contour)
    cloud = ot.Cloud(sample)
    graph.add(cloud)
    return graph
Пример #6
0
def drawBidimensionalSample(sample, title):
    n = sample.getSize()
    graph = ot.Graph("%s, size=%d" % (title, n), r"$X_1$", r"$X_2$", True, '')
    #cloud = ot.Cloud(sample)
    cloud = ot.Cloud(sample, "blue", "fsquare", "")
    graph.add(cloud)
    return graph
    def _inliers_outliers(self, sample=None, inliers=True):
        """Inliers or outliers cloud drawing.

        :param sample: Sample of size (n_samples, n_dims).
        :type sample: :class:`openturns.Sample`
        :param bool inliers: Whether to draw inliers or outliers.
        :return: OpenTURNS Cloud or Pair object if :attr:`dim` > 2.
        :rtype: :class:`openturns.Cloud` or :class:`openturns.Pairs`
        """
        if inliers:
            idx = self.computeOutlierIndices(False)
            legend = "Inliers at alpha=%.4f" % (self.outlierAlpha)
            marker_color = 'blue'
        else:
            idx = self.computeOutlierIndices()
            legend = "Outliers at alpha=%.4f" % (self.outlierAlpha)
            marker_color = 'red'

        if sample is None:
            sample = np.array(self.sample)
        else:
            sample = np.asarray(sample)

        sample = sample[idx, :]

        if sample.size == 0:
            return

        if sample.shape[1] == 2:
            cloud = ot.Cloud(sample, marker_color, self.data_marker, legend)
        else:
            cloud = ot.Pairs(sample, '', self.sample.getDescription(),
                             marker_color, self.data_marker)

        return cloud
def plotMyBasicKriging(krigResult, xMin, xMax, X, Y, level = 0.95):
    '''
    Given a kriging result, plot the data, the kriging metamodel 
    and a confidence interval.
    '''
    samplesize = X.getSize()
    meta = krigResult.getMetaModel()
    graphKriging = meta.draw(xMin, xMax)
    graphKriging.setLegends(["Kriging"])
    # Create a grid of points and evaluate the function and the kriging
    nbpoints = 50
    xGrid = linearSample(xMin,xMax,nbpoints)
    yFunction = g(xGrid)
    yKrig = meta(xGrid)
    # Compute the conditional covariance
    epsilon = ot.Point(nbpoints,1.e-8)
    conditionalVariance = krigResult.getConditionalMarginalVariance(xGrid)+epsilon
    conditionalVarianceSample = ot.Sample([[cv] for cv in conditionalVariance])
    conditionalSigma = sqrt(conditionalVarianceSample)
    # Compute the quantile of the Normal distribution
    alpha = 1-(1-level)/2
    quantileAlpha = ot.DistFunc.qNormal(alpha)
    # Graphics of the bounds
    epsilon = 1.e-8
    dataLower = [yKrig[i,0] - quantileAlpha * conditionalSigma[i,0] for i in range(nbpoints)]
    dataUpper = [yKrig[i,0] + quantileAlpha * conditionalSigma[i,0] for i in range(nbpoints)]
    # Coordinates of the vertices of the Polygons
    vLow = [[xGrid[i,0],dataLower[i]] for i in range(nbpoints)] 
    vUp = [[xGrid[i,0],dataUpper[i]] for i in range(nbpoints)]
    # Compute the Polygon graphics
    boundsPoly = plot_kriging_bounds(vLow,vUp,nbpoints)
    boundsPoly.setLegend("95% bounds")
    # Validate the kriging metamodel
    mmv = ot.MetaModelValidation(xGrid, yFunction, meta)
    Q2 = mmv.computePredictivityFactor()[0]
    # Plot the function
    graphFonction = ot.Curve(xGrid,yFunction)
    graphFonction.setLineStyle("dashed")
    graphFonction.setColor("magenta")
    graphFonction.setLineWidth(2)
    graphFonction.setLegend("Function")
    # Draw the X and Y observed
    cloudDOE = ot.Cloud(X, Y)
    cloudDOE.setPointStyle("circle")
    cloudDOE.setColor("red")
    cloudDOE.setLegend("Data")
    # Assemble the graphics
    graph = ot.Graph()
    graph.add(boundsPoly)
    graph.add(graphFonction)
    graph.add(cloudDOE)
    graph.add(graphKriging)
    graph.setLegendPosition("bottomright")
    graph.setAxes(True)
    graph.setGrid(True)
    graph.setTitle("Size = %d, Q2=%.2f%%" % (samplesize,100*Q2))
    graph.setXTitle("X")
    graph.setYTitle("Y")
    return graph
 def _drawObservationsVsInputs1Dimension(self, inputObservations,
                                         outputObservations, outputAtPrior,
                                         outputAtPosterior):
     """
     Plots the observed output of the model depending 
     on the observed input before and after calibration.
     Can manage only 1D samples.
     """
     xDim = inputObservations.getDimension()
     if (xDim != 1):
         raise TypeError('Input observations are not 1D.')
     yDim = outputObservations.getDimension()
     xdescription = inputObservations.getDescription()
     ydescription = outputObservations.getDescription()
     graph = ot.Graph("", xdescription[0], ydescription[0], True,
                      "topright")
     # Observations
     if (yDim == 1):
         cloud = ot.Cloud(inputObservations, outputObservations)
         cloud.setColor(self.observationColor)
         cloud.setLegend("Observations")
         graph.add(cloud)
     else:
         raise TypeError('Output observations are not 1D.')
     # Model outputs before calibration
     yPriorDim = outputAtPrior.getDimension()
     if (yPriorDim == 1):
         cloud = ot.Cloud(inputObservations, outputAtPrior)
         cloud.setColor(self.priorColor)
         cloud.setLegend("Prior")
         graph.add(cloud)
     else:
         raise TypeError('Output prior predictions are not 1D.')
     # Model outputs after calibration
     yPosteriorDim = outputAtPosterior.getDimension()
     if (yPosteriorDim == 1):
         cloud = ot.Cloud(inputObservations, outputAtPosterior)
         cloud.setColor(self.posteriorColor)
         cloud.setLegend("Posterior")
         graph.add(cloud)
     else:
         raise TypeError('Output posterior predictions are not 1D.')
     return graph
def plotXvsY(sampleX, sampleY, figsize=(15, 3)):
    dimX = sampleX.getDimension()
    inputdescr = sampleX.getDescription()
    fig = pl.figure(figsize=figsize)
    for i in range(dimX):
        ax = fig.add_subplot(1, dimX, i + 1)
        graph = ot.Graph('', inputdescr[i], 'Y', True, '')
        cloud = ot.Cloud(sampleX[:, i], sampleY)
        graph.add(cloud)
        _ = ot.viewer.View(graph, figure=fig, axes=[ax])
    return fig
def plot_exact_model():
    graph = ot.Graph('', 'x', '', True, '')
    y_test = model(x_test)
    curveModel = ot.Curve(x_test, y_test)
    curveModel.setLineStyle("solid")
    curveModel.setColor("black")
    graph.add(curveModel)
    cloud = ot.Cloud(Xtrain, Ytrain)
    cloud.setColor("black")
    cloud.setPointStyle("fsquare")
    graph.add(cloud)
    return graph
Пример #12
0
def drawLevelSet1D(distribution, levelSet, alpha, threshold, sampleSize = 100):
    '''
    Draw a 1D sample included in a given levelSet.
    The sample is generated from the distribution.
    '''
    inLevelSample = computeSampleInLevelSet(distribution,levelSet,sampleSize)
    cloudSample = from1Dto2Dsample(inLevelSample)
    graph = distribution.drawPDF()
    mycloud = ot.Cloud(cloudSample)
    graph.add(mycloud)
    graph.setTitle("%.2f%% of the distribution, sample size = %d, " % (100*alpha, sampleSize))
    return graph
 def _drawObservationsVsPredictions1Dimension(self, outputObservations,
                                              outputAtPrior,
                                              outputAtPosterior):
     """
     Plots the output of the model depending 
     on the output observations before and after calibration.
     Can manage only 1D samples.
     """
     yDim = outputObservations.getDimension()
     ydescription = outputObservations.getDescription()
     xlabel = "%s Observations" % (ydescription[0])
     ylabel = "%s Predictions" % (ydescription[0])
     graph = ot.Graph("", xlabel, ylabel, True, "topleft")
     # Plot the diagonal
     if (yDim == 1):
         curve = ot.Curve(outputObservations, outputObservations)
         curve.setColor(self.observationColor)
         graph.add(curve)
     else:
         raise TypeError('Output observations are not 1D.')
     # Plot the predictions before
     yPriorDim = outputAtPrior.getDimension()
     if (yPriorDim == 1):
         cloud = ot.Cloud(outputObservations, outputAtPrior)
         cloud.setColor(self.priorColor)
         cloud.setLegend("Prior")
         graph.add(cloud)
     else:
         raise TypeError('Output prior predictions are not 1D.')
     # Plot the predictions after
     yPosteriorDim = outputAtPosterior.getDimension()
     if (yPosteriorDim == 1):
         cloud = ot.Cloud(outputObservations, outputAtPosterior)
         cloud.setColor(self.posteriorColor)
         cloud.setLegend("Posterior")
         graph.add(cloud)
     else:
         raise TypeError('Output posterior predictions are not 1D.')
     return graph
Пример #14
0
def drawIFS(f_i,
            skip=100,
            iterations=1000,
            batch_size=1,
            name="IFS",
            color="blue"):
    # Any set of initial points should work in theory
    initialPoints = ot.Normal(2).getSample(batch_size)
    # Compute the contraction factor of each function
    all_r = [m.sqrt(abs(f[1].computeDeterminant())) for f in f_i]
    # Find the box counting dimension, ie the value s such that r_1^s+...+r_n^s-1=0
    equation = "-1.0"
    for r in all_r:
        equation += "+" + str(r) + "^s"
    dim = len(f_i)
    s = ot.Brent().solve(ot.SymbolicFunction("s", equation), 0.0, 0.0,
                         -m.log(dim) / m.log(max(all_r)))
    # Add a small perturbation to sample even the degenerated transforms
    probabilities = [r**s + 1e-2 for r in all_r]
    # Build the sampling distribution
    support = [[i] for i in range(dim)]
    choice = ot.UserDefined(support, probabilities)
    currentPoints = initialPoints
    points = ot.Sample(0, 2)
    # Convert the f_i into LinearEvaluation to benefit from the evaluation over
    # a Sample
    phi_i = [ot.LinearEvaluation([0.0] * 2, f[0], f[1]) for f in f_i]
    # Burning phase
    for i in range(skip):
        index = int(round(choice.getRealization()[0]))
        currentPoints = phi_i[index](currentPoints)
    # Iteration phase
    for i in range(iterations):
        index = int(round(choice.getRealization()[0]))
        currentPoints = phi_i[index](currentPoints)
        points.add(currentPoints)
    # Draw the IFS
    graph = ot.Graph()
    graph.setTitle(name)
    graph.setXTitle("x")
    graph.setYTitle("y")
    graph.setGrid(True)
    cloud = ot.Cloud(points)
    cloud.setColor(color)
    cloud.setPointStyle("dot")
    graph.add(cloud)
    return graph, s
Пример #15
0
    def build(self, dataX, dataY):
        logLikelihood = ot.NumericalMathFunction(ReducedLogLikelihood(dataX, dataY))
        xlb = np.linspace(self.lambdaMin_,self.lambdaMax_,num=500)
        lambdax = [logLikelihood([x])[0] for x in xlb]
        algo = ot.TNC(logLikelihood)
        algo.setStartingPoint([xlb[np.array(lambdax).argmax()]])
        algo.setBoundConstraints(ot.Interval(self.lambdaMin_, self.lambdaMax_))
        algo.setOptimizationProblem(ot.BoundConstrainedAlgorithmImplementationResult.MAXIMIZATION)
        algo.run()
        optimalLambda = algo.getResult().getOptimizer()[0]

        # graph
        optimalLogLikelihood = algo.getResult().getOptimalValue()
        graph = logLikelihood.draw(0.01 * optimalLambda, 10.0 * optimalLambda)
        c = ot.Cloud([[optimalLambda, optimalLogLikelihood]])
        c.setColor("red")
        c.setPointStyle("circle")
        graph.add(c)
        return ot.BoxCoxTransform([optimalLambda]), graph
    def _inliers_outliers(self, sample, inliers=True):
        """Inliers or outliers cloud drawing."""
        # Perform selection
        if inliers:
            idx = self.inlier_indices
            legend = "Inliers at alpha=%.4f" % (self.outlierAlpha)
            marker_color = self.inlier_color
        else:
            idx = self.outlier_indices
            legend = "Outliers at alpha=%.4f" % (self.outlierAlpha)
            marker_color = self.outlier_color

        sample_selection = sample[idx, :]

        if sample_selection.getSize() == 0:
            return

        cloud = ot.Cloud(sample_selection, marker_color, self.data_marker,
                         legend)
        return cloud
Пример #17
0
def drawKL(scaledKL, KLev, mesh, title="Scaled KL modes"):
    graph_modes = scaledKL.drawMarginal()
    graph_modes.setTitle(title + " scaled KL modes")
    graph_modes.setXTitle('$x$')
    graph_modes.setYTitle(r'$\sqrt{\lambda_i}\phi_i$')
    data_ev = [[i, KLev[i]] for i in range(scaledKL.getSize())]
    graph_ev = ot.Graph()
    graph_ev.add(ot.Curve(data_ev))
    graph_ev.add(ot.Cloud(data_ev))
    graph_ev.setTitle(title + " KL eigenvalues")
    graph_ev.setXTitle('$k$')
    graph_ev.setYTitle(r'$\lambda_i$')
    graph_ev.setAxes(True)
    graph_ev.setGrid(True)
    graph_ev.setLogScale(2)
    bb = graph_ev.getBoundingBox()
    lower = bb.getLowerBound()
    lower[1] = 1.0e-7
    bb = ot.Interval(lower, bb.getUpperBound())
    graph_ev.setBoundingBox(bb)
    return graph_modes, graph_ev
Пример #18
0
# %%
sigmaObservationNoiseH = 0.1 # (m)
noiseH = ot.Normal(0.,sigmaObservationNoiseH)
sampleNoiseH = noiseH.getSample(nbobs)
Hobs = outputH + sampleNoiseH

# %%
# Plot the Y observations versus the X observations.

# %%
Qobs = inputSample[:,0]

# %%
graph = ot.Graph("Observations","Q (m3/s)","H (m)",True)
cloud = ot.Cloud(Qobs,Hobs)
graph.add(cloud)
view = viewer.View(graph)

# %%
# Setting the calibration parameters
# ----------------------------------

# %%
# Define the value of the reference values of the :math:`\theta` parameter. In the bayesian framework, this is called the mean of the *prior* normal distribution. In the data assimilation framework, this is called the *background*.

# %%
KsInitial = 20.
ZvInitial = 49.
ZmInitial = 51.
thetaPrior = [KsInitial, ZvInitial, ZmInitial]
Пример #19
0
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
ot.RandomGenerator.SetSeed(0)
factory = ot.GeometricFactory()
ref = factory.build()
dimension = ref.getDimension()
if dimension <= 2:
    sample = ref.getSample(50)
    distribution = factory.build(sample)
    if dimension == 1:
        distribution.setDescription(['$t$'])
        pdf_graph = distribution.drawPDF(256)
        cloud = ot.Cloud(sample, ot.Sample(sample.getSize(), 1))
        cloud.setColor('blue')
        cloud.setPointStyle('fcircle')
        pdf_graph.add(cloud)
        fig = plt.figure(figsize=(10, 4))
        plt.suptitle(str(distribution))
        pdf_axis = fig.add_subplot(111)
        View(pdf_graph, figure=fig, axes=[pdf_axis], add_legend=False)
    else:
        sample = ref.getSample(500)
        distribution.setDescription(['$t_0$', '$t_1$'])
        pdf_graph = distribution.drawPDF([256] * 2)
        cloud = ot.Cloud(sample)
        cloud.setColor('red')
        cloud.setPointStyle('fcircle')
        pdf_graph.add(cloud)
        fig = plt.figure(figsize=(10, 4))
        plt.suptitle(str(distribution))
Пример #20
0
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
size = 6
ot.RandomGenerator.SetSeed(3)

# Data
x1 = ot.Uniform(1.0, 9.0).getSample(size)
y1 = ot.Uniform(0.0, 120.0).getSample(size)
graph = ot.Graph("Non significant Spearman coefficient", "u", "v", True, "")
cloud1 = ot.Cloud(x1, y1)
cloud1.setPointStyle("diamond")
cloud1.setColor("orange")
cloud1.setLineWidth(2)
graph.add(cloud1)

size = 150 - size

# Data
x2 = ot.Uniform(1.0, 9.0).getSample(size)
y2 = ot.Uniform(0.0, 120.0).getSample(size)
# Merge with previous data
x = ot.Sample(x1)
y = ot.Sample(y1)
x.add(x2)
y.add(y2)
# Quadratic model
algo = ot.QuadraticLeastSquares(x, y)
algo.run()
quadratic = algo.getMetaModel()
Пример #21
0
import openturns.viewer as viewer
from matplotlib import pylab as plt
ot.Log.Show(ot.Log.NONE)

# %%
# Create 2-d samples to visualize
N = 500
R = ot.CorrelationMatrix(2)
R[0, 1] = -0.7
# 2d N(1,1) with correlation
sample1 = ot.Normal([1.0] * 2, [1.0] * 2, R).getSample(N)
sample2 = ot.Normal(2).getSample(N)  # 2d N(0,1) independent

# %%
# Create cloud drawables
cloud1 = ot.Cloud(sample1, 'blue', 'fsquare', 'First Cloud')
cloud2 = ot.Cloud(sample2, 'red', 'fsquare', 'Second Cloud')

# Then, assemble it into a graph
myGraph2d = ot.Graph('2d clouds', 'x1', 'x2', True, 'topright')
myGraph2d.add(cloud1)
myGraph2d.add(cloud2)
view = viewer.View(myGraph2d)

# %%
# Create a 3-d sample
mean = [0.0] * 3
sigma = [2.0, 1.5, 1.0]
R = ot.CorrelationMatrix(3)
R[0, 1] = 0.8
R[1, 2] = -0.5
from openturns.viewer import View

f = ot.SymbolicFunction(['x'], ['17-exp(0.1*(x-1.0))'])
graph = f.draw(0.0, 12.0)

dist = ot.Normal([5.0, 15.0], [1.0, 0.25], ot.IdentityMatrix(2))
N = 1000
sample = dist.getSample(N)
sample1 = ot.Sample(0, 2)
sample2 = ot.Sample(0, 2)
for X in sample:
    x, y = X
    if f([x])[0] > y:
        sample1.add(X)
    else:
        sample2.add(X)

cloud = ot.Cloud(sample1)
cloud.setColor('green')
cloud.setPointStyle('square')
graph.add(cloud)

cloud = ot.Cloud(sample2)
cloud.setColor('red')
cloud.setPointStyle('square')
graph.add(cloud)

graph.setTitle('Monte Carlo simulation (Pf=0.048, N=1000)')
graph.setLegends(['domain Df', 'simulations'])
graph.setLegendPosition('topright')
View(graph)
Пример #23
0
# %%
# Scale the design proportionnally to the standard deviation of each component.

# %%
covariance = rv.getCovariance()
scaling = [m.sqrt(covariance[i, i]) for i in range(dim)]
print('scaling=', scaling)
sample *= scaling

# %%
# Center the design around the mean point of the distribution.

# %%
center = rv.getMean()
print('center=', center)
sample += center

# %%
# Draw the design as well as the distribution iso-values.

# %%
graph = distribution.drawPDF()
doe = ot.Cloud(sample)
doe.setColor('red')
doe.setLegend('design')
graph.add(doe)
view = viewer.View(graph)
plt.show()
# %%
size = 64
distribution = ot.ComposedDistribution(
    [ot.Uniform(lowerbound[0], upperbound[0])] * dim)
experiment = ot.LowDiscrepancyExperiment(
    ot.SobolSequence(), distribution, size)
solver = ot.MultiStart(ot.Cobyla(problem), experiment.generate())

# %%
# Visualize the starting points of the optimization algorithm
# -----------------------------------------------------------

# %%
startingPoints = solver.getStartingSample()
graph = rastrigin.draw(lowerbound, upperbound, [100]*dim)
graph.setTitle("Rastrigin function")
cloud = ot.Cloud(startingPoints)
cloud.setPointStyle("bullet")
cloud.setColor("black")
graph.add(cloud)
graph.setLegends([""])
# sphinx_gallery_thumbnail_number = 2
view = viewer.View(graph)

# %%
# We see that the starting points are well spread accross the input domain of the function.

# %%
# Solve the optimization problem
# ------------------------------

# %%
Пример #25
0
degrees = range(5, 12)
q2 = ot.Sample(len(degrees), 2)
for maximumDegree in degrees:
    ot.ResourceMap.SetAsUnsignedInteger("FunctionalChaosAlgorithm-MaximumTotalDegree",maximumDegree)
    print("Maximum total degree =", maximumDegree)
    algo = ot.FunctionalChaosAlgorithm(inputSample, outputSample)
    algo.run()
    result = algo.getResult()
    metamodel = result.getMetaModel()
    for outputIndex in range(2):
        val = ot.MetaModelValidation(inputTest, outputTest[:,outputIndex], metamodel.getMarginal(outputIndex))
        q2[maximumDegree - degrees[0], outputIndex] = val.computePredictivityFactor()[0]

# %%
graph = ot.Graph("Predictivity","Total degree","Q2",True)
cloud = ot.Cloud([[d] for d in degrees], q2[:,0])
cloud.setLegend("Output #0")
cloud.setPointStyle("bullet")
graph.add(cloud)
cloud = ot.Cloud([[d] for d in degrees], q2[:,1])
cloud.setLegend("Output #1")
cloud.setColor("red")
cloud.setPointStyle("bullet")
graph.add(cloud)
graph.setLegendPosition("topright")
view = viewer.View(graph)
plt.show()

# %%
# We see that a total degree lower than 9 is not sufficient to describe the first output with good predictivity. However, the coefficient of predictivity drops when the total degree gets greater than 12. 
# The predictivity of the second output seems to be much less satisfactory: a little more work would be required to improve the metamodel. 
Пример #26
0
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
ot.RandomGenerator.SetSeed(0)
factory = ot.TriangularFactory()
ref = factory.build()
dimension = ref.getDimension()
if dimension <= 2:
    sample = ref.getSample(50)
    distribution = factory.build(sample)
    if dimension == 1:
        distribution.setDescription(['$t$'])
        pdf_graph = distribution.drawPDF(256)
        cloud = ot.Cloud(sample, ot.NumericalSample(sample.getSize(), 1))
        cloud.setColor('blue')
        cloud.setPointStyle('fcircle')
        pdf_graph.add(cloud)
        fig = plt.figure(figsize=(10, 4))
        plt.suptitle(str(distribution))
        pdf_axis = fig.add_subplot(111)
        View(pdf_graph, figure=fig, axes=[pdf_axis], add_legend=False)
    else:
        sample = ref.getSample(500)
        distribution.setDescription(['$t_0$', '$t_1$'])
        pdf_graph = distribution.drawPDF([256]*2)
        cloud = ot.Cloud(sample)
        cloud.setColor('red')
        cloud.setPointStyle('fcircle')
        pdf_graph.add(cloud)
        fig = plt.figure(figsize=(10, 4))
        plt.suptitle(str(distribution))
Пример #27
0
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View

f = ot.SymbolicFunction(['x'], ['sin(x)'])
a = -2.5
b = 4.5
algo = ot.Fejer1([20])
value, nodes = algo.integrateWithNodes(f, ot.Interval(a, b))

g = f.draw(a, b, 512)
lower = ot.Cloud(nodes, ot.Sample(nodes.getSize(), 1))
lower.setColor("magenta")
lower.setPointStyle('circle')
g.add(lower)
g.setTitle(r"GaussLegendre example: $\int_{-5/2}^{9/2}\sin(t)\,dt=$" +
           str(value[0]))

fig = plt.figure(figsize=(8, 4))
axis = fig.add_subplot(111)
axis.set_xlim(auto=True)
View(g, figure=fig, axes=[axis], add_legend=False)
Пример #28
0
import openturns as ot
from openturns.viewer import View

N = 20
ot.RandomGenerator.SetSeed(10)
x = ot.Uniform(0.0, 10.0).getSample(N)
f = ot.SymbolicFunction(['x'], ['5*x+10'])
y = f(x) + ot.Normal(0.0, 5.0).getSample(N)
graph = f.draw(0.0, 10.0)
graph.setTitle(
    '... because the rank transformation turns any monotonic trend\ninto a linear relation for which Pearson\'s correlation is relevant'
)
graph.setXTitle('u')
graph.setYTitle('v')
cloud = ot.Cloud(x, y)
cloud.setPointStyle('circle')
cloud.setColor('orange')
graph.add(cloud)
View(graph)
delta = xexact - xoptim
absoluteError = delta.norm()
absoluteError

# %%
# We see that the algorithm found an accurate approximation of the solution.

# %%
result.getOptimalValue()  # f(x*)

# %%
result.getEvaluationNumber()

# %%
graph = rosenbrock.draw(lowerbound, upperbound, [100] * 2)
cloud = ot.Cloud(ot.Sample([x0, xoptim]))
cloud.setColor("black")
cloud.setPointStyle("bullet")
graph.add(cloud)
graph.setTitle("Rosenbrock function")
view = viewer.View(graph)

# %%
# We see that the algorithm had to start from the top left of the banana and go to the top right.

# %%
graph = result.drawOptimalValueHistory()
view = viewer.View(graph)

# %%
# The function value history make the path of the algorithm clear. In the first step, the algorithm went in the valley, which made the function value decrease rapidly. Once there, the algorithm had to follow the bottom of the valley so that the function decreased but slowly. In the final steps, the algorithm found the neighbourhood of the minimum so that the local convergence could take place.
Пример #30
0
ref_func_with_error = ot.SymbolicFunction(['x', 'eps'], ['x * sin(x) + eps'])
ref_func = ot.ParametricFunction(ref_func_with_error, [1], [0.0])
x = np.vstack(np.linspace(xmin, xmax, n_pt))
ot.RandomGenerator.SetSeed(1235)
eps = ot.Normal(0, 1.5).getSample(n_pt)
X = ot.Sample(n_pt, 2)
X[:, 0] = x
X[:, 1] = eps
if with_error:
    y = np.array(ref_func_with_error(X))
else:
    y = np.array(ref_func(x))

# %%
graph = ref_func.draw(xmin, xmax, 200)
cloud = ot.Cloud(x, y)
cloud.setColor('red')
cloud.setPointStyle('bullet')
graph.add(cloud)
graph.setLegends(["Function", "Data"])
graph.setLegendPosition("topleft")
graph.setTitle("Sample size = %d" % (n_pt))
view = viewer.View(graph)

# %%
# Create the kriging algorithm
# ----------------------------

# 1. basis
ot.ResourceMap.SetAsBool(
    'GeneralLinearModelAlgorithm-UseAnalyticalAmplitudeEstimate', True)