Esempio n. 1
0
 def getMetaModelValidation(self, sample_in_validation, sample_out_validation):
     assert self.__kriging_metamodel__ is not None, "Please first run calculus"
     assert len(sample_in_validation) == len(sample_out_validation)
     self._check_clean_nans(sample_in_validation, sample_out_validation)
     self.validation_results.clear()
     if isinstance(self.__kriging_metamodel__,(Sequence,Iterable, list)):
         for i, model in enumerate(self.__kriging_metamodel__):
             validation = ot.MetaModelValidation(sample_in_validation,
                                                 sample_out_validation[:,i],
                                                 self.__kriging_metamodel__[i])
             R2 = validation.computePredictivityFactor()
             residual = validation.getResidualSample()
             graph = validation.drawValidation()
             self.validation_results.addGraph(graph)
             self.validation_results.addR2(R2)
             self.validation_results.addResidual(residual)
     else :
         validation = ot.MetaModelValidation(sample_in_validation,
                                             sample_out_validation[:,0],
                                             self.__kriging_metamodel__)
         R2 = validation.computePredictivityFactor()
         residual = validation.getResidualSample()
         graph = validation.drawValidation()
         self.validation_results.addGraph(graph)
         self.validation_results.addR2(R2)
         self.validation_results.addResidual(residual)
def computeQ2Chaos(chaosResult, inputTest, outputTest):
    """Compute the Q2 of a chaos."""
    metamodel = chaosResult.getMetaModel()
    val = ot.MetaModelValidation(inputTest, outputTest, metamodel)
    Q2 = val.computePredictivityFactor()[0]
    Q2 = max(Q2, 0.0)  # We are not lucky every day.
    return Q2
def draw_polynomial_chaos_validation(polynomialchaos_result,
                                     g_function,
                                     input_distribution,
                                     n_valid=1000):
    """
    Validate the polynomial chaos.

    Create the validation plot.


    Parameters
    ----------
    polynomialChaosResult : ot.FunctionalChaosResult
        The polynomial chaos expansion.
    g_function : ot.Function
        The function.
    input_distribution : ot.Distribution
        The input distribution.
    n_valid : int
        The number of simulations to compute the Q2 score.

    Returns
    -------
    Q2 : float
        The Q2 score
    """
    metamodel = polynomialchaos_result.getMetaModel()
    inputTest = input_distribution.getSample(n_valid)
    outputTest = g_function(inputTest)
    val = ot.MetaModelValidation(inputTest, outputTest, metamodel)
    Q2 = val.computePredictivityFactor()[0]
    graph = val.drawValidation()
    graph.setTitle("Q2=%.2f%%" % (Q2 * 100))
    view = otv.View(graph, figure_kw={"figsize": (5.0, 4.0)})
    return view
def compute_polynomial_chaos_Q2(polynomialchaos_result,
                                g_function,
                                input_distribution,
                                n_valid=1000):
    """
    Compute the Q2 score of the polynomial chaos.


    Parameters
    ----------
    polynomialChaosResult : ot.FunctionalChaosResult
        The polynomial chaos expansion.
    g_function : ot.Function
        The function.
    input_distribution : ot.Distribution
        The input distribution.
    n_valid : int
        The number of simulations to compute the Q2 score.

    Returns
    -------
    Q2 : float
        The Q2 score
    """
    ot.RandomGenerator.SetSeed(1976)
    metamodel = polynomialchaos_result.getMetaModel()
    inputTest = input_distribution.getSample(n_valid)
    outputTest = g_function(inputTest)
    val = ot.MetaModelValidation(inputTest, outputTest, metamodel)
    Q2 = val.computePredictivityFactor()[0]
    return Q2
def plotMyBasicKriging(krigResult, xMin, xMax, X, Y, level = 0.95):
    '''
    Given a kriging result, plot the data, the kriging metamodel 
    and a confidence interval.
    '''
    samplesize = X.getSize()
    meta = krigResult.getMetaModel()
    graphKriging = meta.draw(xMin, xMax)
    graphKriging.setLegends(["Kriging"])
    # Create a grid of points and evaluate the function and the kriging
    nbpoints = 50
    xGrid = linearSample(xMin,xMax,nbpoints)
    yFunction = g(xGrid)
    yKrig = meta(xGrid)
    # Compute the conditional covariance
    epsilon = ot.Point(nbpoints,1.e-8)
    conditionalVariance = krigResult.getConditionalMarginalVariance(xGrid)+epsilon
    conditionalVarianceSample = ot.Sample([[cv] for cv in conditionalVariance])
    conditionalSigma = sqrt(conditionalVarianceSample)
    # Compute the quantile of the Normal distribution
    alpha = 1-(1-level)/2
    quantileAlpha = ot.DistFunc.qNormal(alpha)
    # Graphics of the bounds
    epsilon = 1.e-8
    dataLower = [yKrig[i,0] - quantileAlpha * conditionalSigma[i,0] for i in range(nbpoints)]
    dataUpper = [yKrig[i,0] + quantileAlpha * conditionalSigma[i,0] for i in range(nbpoints)]
    # Coordinates of the vertices of the Polygons
    vLow = [[xGrid[i,0],dataLower[i]] for i in range(nbpoints)] 
    vUp = [[xGrid[i,0],dataUpper[i]] for i in range(nbpoints)]
    # Compute the Polygon graphics
    boundsPoly = plot_kriging_bounds(vLow,vUp,nbpoints)
    boundsPoly.setLegend("95% bounds")
    # Validate the kriging metamodel
    mmv = ot.MetaModelValidation(xGrid, yFunction, meta)
    Q2 = mmv.computePredictivityFactor()[0]
    # Plot the function
    graphFonction = ot.Curve(xGrid,yFunction)
    graphFonction.setLineStyle("dashed")
    graphFonction.setColor("magenta")
    graphFonction.setLineWidth(2)
    graphFonction.setLegend("Function")
    # Draw the X and Y observed
    cloudDOE = ot.Cloud(X, Y)
    cloudDOE.setPointStyle("circle")
    cloudDOE.setColor("red")
    cloudDOE.setLegend("Data")
    # Assemble the graphics
    graph = ot.Graph()
    graph.add(boundsPoly)
    graph.add(graphFonction)
    graph.add(cloudDOE)
    graph.add(graphKriging)
    graph.setLegendPosition("bottomright")
    graph.setAxes(True)
    graph.setGrid(True)
    graph.setTitle("Size = %d, Q2=%.2f%%" % (samplesize,100*Q2))
    graph.setXTitle("X")
    graph.setYTitle("Y")
    return graph
Esempio n. 6
0
def drawMetaModelValidation(X_test, Y_test, krigingMetamodel, title):
    val = ot.MetaModelValidation(X_test, Y_test, krigingMetamodel)
    Q2 = val.computePredictivityFactor()[0]
    graph = val.drawValidation().getGraph(0, 0)
    graph.setLegends([""])
    graph.setLegends(["%s, Q2 = %.2f%%" % (title, 100*Q2), ""])
    graph.setLegendPosition("topleft")
    return graph
def printChaosStats(multivariateBasis, chaosResult, inputTest, outputTest,
                    totalDegree):
    """Print statistics of a chaos."""
    sparsityRate = computeSparsityRate(multivariateBasis, totalDegree,
                                       chaosResult)
    Q2 = computeQ2Chaos(chaosResult, inputTest, outputTest)
    metamodel = chaosResult.getMetaModel()
    val = ot.MetaModelValidation(inputTest, outputTest, metamodel)
    graph = val.drawValidation().getGraph(0, 0)
    legend1 = "D=%d, Q2=%.2f%%" % (totalDegree, 100 * Q2)
    graph.setLegends(["", legend1])
    graph.setLegendPosition("topleft")
    print("Degree=%d, Q2=%.2f%%, Sparsity=%.2f%%" %
          (totalDegree, 100 * Q2, 100 * sparsityRate))
    return graph
Esempio n. 8
0
# Validate the metamodel
# ----------------------

# %%
# We finally want to validate the Kriging metamodel. This is why we generate a validation sample with size 100 and we evaluate the output of the model on this sample.

# %%
sampleSize_test = 100
X_test = myDistribution.getSample(sampleSize_test)
Y_test = model(X_test)

# %%
# The `MetaModelValidation` classe makes the validation easy. To create it, we use the validation samples and the metamodel.

# %%
val = ot.MetaModelValidation(X_test, Y_test, krigingMetamodel)

# %%
# The `computePredictivityFactor` computes the Q2 factor.

# %%
Q2 = val.computePredictivityFactor()[0]
print(Q2)

# %%
# The residuals are the difference between the model and the metamodel.

# %%
r = val.getResidualSample()
graph = ot.HistogramFactory().build(r).drawPDF()
graph.setXTitle("Residuals (cm)")
Esempio n. 9
0
# Validate the metamodel
# ----------------------

# %%
# Generate a validation sample (which is independent of the training sample).

# %%
n_valid = 1000
inputTest = myDistribution.getSample(n_valid)
outputTest = g(inputTest)

# %%
# The `MetaModelValidation` class validates the metamodel based on a validation sample.

# %%
val = ot.MetaModelValidation(inputTest, outputTest, metamodel)

# %%
# Compute the :math:`Q^2` predictivity coefficient

# %%
Q2 = val.computePredictivityFactor()[0]
Q2

# %%
# Plot the observed versus the predicted outputs.

# %%
graph = val.drawValidation()
graph.setTitle("Q2=%.2f%%" % (Q2 * 100))
view = viewer.View(graph)
view = viewer.View(graph)

# %%
# We see that the metamodel fits approximately to the model, except perhaps for extreme values of :math:`x_2`. However, there is a better way of globally validating the metamodel, using the `MetaModelValidation` on a validation design of experiment. 

# %%
n_valid = 100
inputTest = distribution.getSample(n_valid)
outputTest = model(inputTest)


# %%
# Plot the corresponding validation graphics.

# %%
val = ot.MetaModelValidation(inputTest, outputTest, metamodel)
Q2 = val.computePredictivityFactor()
graph = val.drawValidation()
graph.setTitle("Metamodel validation Q2="+str(Q2))
view = viewer.View(graph)

# %%
# The coefficient of predictivity is not extremely satisfactory for the first output, but is would be sufficient for a central dispersion study.
# The second output has a much more satisfactory Q2: only one single extreme point is far from the diagonal of the graphics.

# %%
# Compute and print Sobol' indices
# --------------------------------

# %%
chaosSI = ot.FunctionalChaosSobolIndices(result) 
Esempio n. 11
0
legend = ax.legend()
ax.autoscale()

# %%
# Validation
# ----------

# %%
n_valid = 10
x_valid = ot.Uniform(xmin, xmax).getSample(n_valid)
if with_error:
    X_valid = ot.Sample(x_valid)
    X_valid.stack(ot.Normal(0.0, 1.5).getSample(n_valid))
    y_valid = np.array(ref_func_with_error(X_valid))
else:
    y_valid = np.array(ref_func(X_valid))

# %%
validation = ot.MetaModelValidation(x_valid, y_valid, krigingMeta)
validation.computePredictivityFactor()

# %%
graph = validation.drawValidation()
view = viewer.View(graph)

# %%
graph = validation.getResidualDistribution().drawPDF()
graph.setXTitle("Residuals")
view = viewer.View(graph)
plt.show()
Esempio n. 12
0
# Validate the metamodel
# ----------------------

# %%
# We finally want to validate the kriging metamodel. This is why we generate a validation sample which size is equal to 100 and we evaluate the output of the model on this sample.

# %%
sampleSize_test = 200
X_test = myDistribution.getSample(sampleSize_test)
Y_test = model(X_test)

# %%
# The `MetaModelValidation` classe makes the validation easy. To create it, we use the validation samples and the metamodel.

# %%
val = ot.MetaModelValidation(X_test, Y_test, metamodel)

# %%
# The `computePredictivityFactor` computes the Q2 factor.

# %%
Q2 = val.computePredictivityFactor()[0]
Q2

# %%
# Since the Q2 is larger than 95%, we can say that the quality is acceptable.

# %%
# The residuals are the difference between the model and the metamodel.

# %%
Esempio n. 13
0
validationKL = ot.KarhunenLoeveValidation(outputFMUTestSample, resultKL)
graph = validationKL.computeResidualMean().draw()
ot.Show(graph)

# %%
# As the epidemiological model considers a population size of 700, the residual
# mean error on the field is acceptable.

# %%
# We validate the Kriging (using the Karhunen-Loeve coefficients of the test
# sample):

projectFunction = ot.KarhunenLoeveProjection(resultKL)
coefficientSample = projectFunction(outputFMUTestSample)

validationKriging = ot.MetaModelValidation(inputTestSample, coefficientSample,
                                           metamodel)
Q2 = validationKriging.computePredictivityFactor()[0]
print(Q2)

# %%
# The predictivity factor is very close to 1, which is satisfying.
# Further statistical tests exist in
# `OpenTURNS <http://openturns.github.io/openturns/master/contents.html>`_ to
# assert the quality of the obtained metamodel.

# %%
#
# ----------------------
#
# In this script, we have created and validated the ``globalMetamodel``. This
# metamodel (computationnally faster than the FMU) can now be employed instead
# Projection strategy
projectionStrategy = ot.LeastSquaresStrategy(
    inputSample, outputSample, leastSquaresFactory)

algo = ot.FunctionalChaosAlgorithm(
    inputSample, outputSample, distribution, adaptiveStrategy, projectionStrategy)
# Reinitialize the RandomGenerator to see the effect of the sampling
# method only
ot.RandomGenerator.SetSeed(0)
algo.run()

# Get the results
result = algo.getResult()

# MetaModelValidation - SPC
metaModelValidationSPC = ot.MetaModelValidation(
    inputValidation, outputValidation, result.getMetaModel())
print("")
print("Sparse chaos scoring")
print(
    "Q2 = ", round(metaModelValidationSPC.computePredictivityFactor(), 5))
print("Residual sample = ", repr(
    metaModelValidationSPC.getResidualSample()))

# 2) Kriging algorithm
# KrigingAlgorithm
basis = ot.QuadraticBasisFactory(dimension).build()
# model already computed, separately
covarianceModel = ot.GeneralizedExponential(
    [3.52, 2.15, 2.99], [11.41], 2.0)
algo2 = ot.KrigingAlgorithm(
    inputSample, outputSample, covarianceModel, basis)
Esempio n. 15
0
_ = View(graph)

# %%
# Inspect the chaos quality: residuals and relative errors.
# The relative error is very low; that means the chaos decomposition performs very well.
print(f"residuals={result.getFCEResult().getResiduals()}")
print(f"relative errors={result.getFCEResult().getRelativeErrors()}")

# %%
# Graphically validate the chaos result:
# we can see the points are very close to the diagonal; this means
# approximated points are very close to the learning points.
modes = result.getModesSample()
metamodel = result.getFCEResult().getMetaModel()
output = result.getOutputSample()
validation = ot.MetaModelValidation(modes, output, metamodel)
q2 = validation.computePredictivityFactor()
print(f"q2={q2}")
graph = validation.drawValidation()
graph.setTitle(f'Chaos validation - q2={q2}')
_ = View(graph)

# %%
# Perform an evaluation on a new realization and ensure the output
# is close to the evaluation with the reference function
metamodel = result.getFieldToPointMetamodel()
x0 = X.getRealization()
y0 = f(x0)
y0hat = metamodel(x0)
print(f'y0={y0} y0^={y0hat}')
X = ot.ComposedDistribution([dist_E, dist_F, dist_L, dist_I])

g = ot.SymbolicFunction(["E", "F", "L", "I"], ["F* L^3 /  (3 * E * I)"])
g.setOutputDescription(["Y (cm)"])

# Pour pouvoir exploiter au mieux les simulations, nous équipons 
# la fonction d'un méchanisme d'historique.
g = ot.MemoizeFunction(g)


# Enfin, nous définissons le vecteur aléatoire de sortie.

XRV = ot.RandomVector(X)
Y = ot.CompositeRandomVector(g, XRV)
Y.setDescription(["Y (cm)"])

# ## Régression linéaire avec LinearLeastSquares

n = 1000
sampleX = X.getSample(n)
sampleY = g(sampleX)

myLeastSquares = ot.LinearLeastSquares(sampleX, sampleY)
myLeastSquares.run()
responseSurface = myLeastSquares.getMetaModel()

val = ot.MetaModelValidation(sampleX, sampleY, responseSurface)

graph = val.drawValidation()
view = otv.View(graph)