def test_two_inputs_one_output():
    # Kriging use case
    inputDimension = 2

    # Learning data
    levels = [8, 5]
    box = ot.Box(levels)
    inputSample = box.generate()
    # Scale each direction
    inputSample *= 10.0

    model = ot.SymbolicFunction(['x', 'y'], ['cos(0.5*x) + sin(y)'])
    outputSample = model(inputSample)

    # Validation
    sampleSize = 10
    inputValidSample = ot.ComposedDistribution(
        2 * [ot.Uniform(0, 10.0)]).getSample(sampleSize)
    outputValidSample = model(inputValidSample)

    # 2) Definition of exponential model
    # The parameters have been calibrated using TNC optimization
    # and AbsoluteExponential models
    scales = [5.33532, 2.61534]
    amplitude = [1.61536]
    covarianceModel = ot.SquaredExponential(scales, amplitude)

    # 3) Basis definition
    basis = ot.ConstantBasisFactory(inputDimension).build()

    # 4) Kriging algorithm
    algo = ot.KrigingAlgorithm(inputSample, outputSample, covarianceModel,
                               basis)
    algo.run()

    result = algo.getResult()
    # Get meta model
    metaModel = result.getMetaModel()
    outData = metaModel(inputValidSample)

    # 5) Errors
    # Interpolation
    ott.assert_almost_equal(outputSample, metaModel(inputSample), 3.0e-5,
                            3.0e-5)

    # 6) Kriging variance is 0 on learning points
    covariance = result.getConditionalCovariance(inputSample)
    ott.assert_almost_equal(covariance, ot.SquareMatrix(len(inputSample)),
                            7e-7, 7e-7)

    # Covariance per marginal & extract variance component
    coll = result.getConditionalMarginalCovariance(inputSample)
    var = [mat[0, 0] for mat in coll]
    ott.assert_almost_equal(var, [0] * len(var), 0.0, 1e-13)

    # Variance per marginal
    var = result.getConditionalMarginalVariance(inputSample)
    ott.assert_almost_equal(var, ot.Point(len(inputSample)), 0.0, 1e-13)
    # Estimation
    ott.assert_almost_equal(outputValidSample, outData, 1.e-1, 1e-1)
Beispiel #2
0
    def _buildKrigingAlgo(self, inputSample, outputSample):
        """
        Build the functional chaos algorithm without running it.
        """
        if self._basis is None:
            # create linear basis only for the defect parameter (1st parameter),
            # constant otherwise
            input = ['x' + str(i) for i in range(self._dim)]
            functions = []
            # constant
            functions.append(ot.SymbolicFunction(input, ['1']))
            # linear for the first parameter only
            functions.append(ot.SymbolicFunction(input, [input[0]]))
            self._basis = ot.Basis(functions)

        if self._covarianceModel is None:
            # anisotropic squared exponential covariance model
            self._covarianceModel = ot.SquaredExponential([1] * self._dim)

        # normalization
        mean = inputSample.computeMean()
        try:
            stddev = inputSample.computeStandardDeviation()
        except AttributeError:
            stddev = inputSample.computeStandardDeviationPerComponent()
        linear = ot.SquareMatrix(self._dim)
        for j in range(self._dim):
            linear[j, j] = 1.0 / stddev[j] if abs(stddev[j]) > 1e-12 else 1.0
        zero = [0.0] * self._dim
        transformation = ot.LinearFunction(mean, zero, linear)

        algoKriging = ot.KrigingAlgorithm(transformation(inputSample),
                                          outputSample, self._covarianceModel,
                                          self._basis)
        return algoKriging, transformation
Beispiel #3
0
def computeDurbinWatsonTest(x, residuals, hypothesis="Equal"):
    # Parameters:
    # hypothesis : string
    #    "Equal" : hypothesis is autocorrelation is 0
    #    "Less" : hypothesis is autocorrelation is less than 0
    #    "Greater" : hypothesis is autocorrelation is greater than 0
    nx = x.getSize()
    dim = x.getDimension()
    residuals = np.array(residuals)
    # statistic Durbin Watson
    dw = np.sum(np.diff(np.hstack(residuals))**2) / np.sum(residuals**2)

    # Normal approxiimation of DW to compute the pvalue
    X = ot.Matrix(nx, dim + 1)
    X[:, 0] = np.ones((nx, 1))
    X[:, 1] = x
    B = ot.Matrix(nx, dim + 1)
    B[0, 1] = x[0][0] - x[1][0]
    B[nx - 1, 1] = x[nx - 1][0] - x[nx - 2][0]
    for i in range(nx - 2):
        B[i + 1, 1] = -x[i][0] + 2 * x[i + 1][0] - x[i + 2][0]

    XtX = X.computeGram()
    XBQt = ot.SquareMatrix(XtX.solveLinearSystem(B.transpose() * X))
    P = 2 * (nx - 1) - XBQt.computeTrace()
    XBTrace = ot.SquareMatrix(XtX.solveLinearSystem(B.computeGram(),
                                                    False)).computeTrace()
    Q = 2 * (3 * nx - 4) - 2 * XBTrace + ot.SquareMatrix(
        XBQt * XBQt).computeTrace()
    dmean = P / (nx - (dim + 1))
    dvar = 2.0 / ((nx - (dim + 1)) * (nx - (dim + 1) + 2)) * (Q - P * dmean)

    # compute the pvalue with respect to hypothesis
    # Default pvalue is for hypothesis == "Equal"
    # complementary CDF of standard normal distribution
    pValue = 2 * ot.DistFunc.pNormal(np.abs(dw - dmean) / np.sqrt(dvar), True)
    if hypothesis == "Less":
        pValue = 1 - pValue / 2
    elif hypothesis == "Greater":
        pValue = pValue / 2

    return pValue
 def __init__(self, p, mesh):
     # 1 = input dimension, the dimension of the input field
     # 1 = output dimension, the dimension of the output field
     # 1 = mesh dimension
     super(ConvolutionP1, self).__init__(mesh, 1, mesh, 1)
     # Here we define some constants and we set-up the invariant part of the execution
     self.setInputDescription(["x"])
     self.setOutputDescription(["y"])
     vertices = mesh.getVertices()
     size = vertices.getSize()
     self.mat_W_ = ot.SquareMatrix(size)
     for i in range(size):
         x_minus_t = (vertices - vertices[i]) * (-1.0)
         values_w = p(x_minus_t)
         for j in range(size):
             self.mat_W_[i, j] = values_w[j, 0]
     G = mesh.computeP1Gram()
     self.mat_W_ = self.mat_W_ * G
#
# The library proposes to model it through the object *DiscreteMarkovChain* defined thanks to the origin :math:`X_{t_0}` (which can be either deterministic or uncertain), the transition matrix :math:`\mathcal{M}` and the time grid.

# %%
import openturns as ot
import openturns.viewer as viewer
from matplotlib import pylab as plt
ot.Log.Show(ot.Log.NONE)

# %%
# Define the origin
origin = ot.Dirac(0.0)

# %%
# Define the transition matrix
transition = ot.SquareMatrix([[0.1, 0.3, 0.6], [0.7, 0.1, 0.2],
                              [0.5, 0.3, 0.2]])

# %%
# Define an 1-d mesh
tgrid = ot.RegularGrid(0.0, 1.0, 50)

# %%
# Markov chain definition and realization
process = ot.DiscreteMarkovChain(origin, transition, tgrid)
real = process.getRealization()
graph = real.drawMarginal(0)
graph.setTitle('Discrete Markov chain')
view = viewer.View(graph)

# %%
# Get several realizations
#!/usr/bin/env python

import openturns as ot
import os

ot.TESTPREAMBLE()

process = ot.DiscreteMarkovChain()
print("Default constructor : process = ")
print(process)

process = ot.DiscreteMarkovChain(0, ot.SquareMatrix([[1.0, 0.0], [0.0, 1.0]]))
print("Constructor from int and SquareMatrix: process = ")
print(process)

transitionMatrix = ot.SquareMatrix([[0.0, 0.5, 0.5], [0.7, 0.0, 0.3],
                                    [0.8, 0.0, 0.2]])
print("transition matrix =")
print(transitionMatrix)

origin = 1
print("origin =")
print(origin)

process.setTransitionMatrix(transitionMatrix)
print("Transition matrix accessor : process = ")
print(process)

process.setOrigin(origin)
print("Origin accessor : process = ")
print(process)
# check Description typemap
sample.setDescription(('x0', 'x1', 'x2'))
print(sample.getDescription())
sample.setDescription(('y0', 'y1', 'y2'))
print(sample.getDescription())
sample.setDescription(np.array(('z0', 'z1', 'z2')))
print(sample.getDescription())

# Check Matrix tuple constructor
t0 = (1., 2., 3., 4.)

m0 = ot.Matrix(2, 2, t0)
print("tuple", t0, "=> Matrix", m0)

m0 = ot.SquareMatrix(2, t0)
print("tuple", t0, "=> SquareMatrix", m0)

m0 = ot.SymmetricMatrix(2, t0)
print("tuple", t0, "=> SymmetricMatrix", m0)

m0 = ot.Tensor(2, 2, 1, t0)
print("tuple", t0, "=> Tensor", m0)

m0 = ot.SymmetricTensor(2, 1, t0)
print("tuple", t0, "=> SymmetricTensor", m0)

m0 = ot.CorrelationMatrix(2, t0)
print("tuple", t0, "=> CorrelationMatrix", m0)

m0 = ot.CovarianceMatrix(2, t0)
# try to generate a sample
sampleSize = 1000
sample = sampler.getSample(sampleSize)

x_mu = sample.computeMean()
x_sigma = sample.computeStandardDeviation()

# compute covariance
x_cov = sample.computeCovariance()
P = ot.Matrix(obsSize, chainDim)
for i in range(obsSize):
    for j in range(chainDim):
        P[i, j] = p[i, j]
Qn = P.transpose() * P + Q0
Qn_inv = ot.SquareMatrix(chainDim)
for j in range(chainDim):
    I_j = [0] * chainDim
    I_j[j] = 1.0
    Qn_inv_j = Qn.solveLinearSystem(I_j)
    for i in range(chainDim):
        Qn_inv[i, j] = Qn_inv_j[i]

sigma_exp = [0] * chainDim
for i in range(chainDim):
    sigma_exp[i] = m.sqrt(Qn_inv[i, i])
y_vec = [0] * obsSize
for i in range(obsSize):
    y_vec[i] = y_obs[i, 0]

x_emp = Qn.solveLinearSystem(P.transpose() * y_vec)
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
if ot.WhiteNoise().__class__.__name__ == 'Process':
    # default to Gaussian for the interface class
    process = ot.GaussianProcess()
elif ot.WhiteNoise().__class__.__name__ == 'DiscreteMarkovChain':
    process = ot.WhiteNoise()
    process.setTransitionMatrix(ot.SquareMatrix([[0.0,0.5,0.5],[0.7,0.0,0.3],[0.8,0.0,0.2]]))
    origin = 0
    process.setOrigin(origin)
else:
    process = ot.WhiteNoise()
process.setTimeGrid(ot.RegularGrid(0.0, 0.02, 50))
process.setDescription(['$x$'])
sample = process.getSample(6)
sample_graph = sample.drawMarginal(0)
sample_graph.setTitle(str(process))

fig = plt.figure(figsize=(10, 4))
sample_axis = fig.add_subplot(111)
View(sample_graph, figure=fig, axes=[sample_axis], add_legend=False)
Beispiel #10
0
dt = 0.5
N = int((20.0 - t0) / dt)
mesh = ot.RegularGrid(t0, dt, N)

# Create the covariance function


def gamma(tau):
    return 1.0 / (1.0 + tau * tau)


# Create the collection of HermitianMatrix
coll = ot.SquareMatrixCollection()
for k in range(N):
    t = mesh.getValue(k)
    matrix = ot.SquareMatrix([[gamma(t)]])
    coll.add(matrix)

# %%
# Create the covariance model
covmodel = ot.UserDefinedStationaryCovarianceModel(mesh, coll)

# One vertex of the mesh
tau = 1.5

# Get the covariance function computed at the vertex tau
covmodel(tau)

# %%
# Graph of the spectral function
x = ot.Sample(N, 2)
#! /usr/bin/env python

import openturns as ot

ref_values = [[1.0, 0.0], [0.0, 0.5]]

mats = [ot.Matrix(ref_values),
        ot.SquareMatrix(ref_values),
        ot.TriangularMatrix(ref_values),
        ot.SymmetricMatrix(ref_values),
        ot.CovarianceMatrix(ref_values),
        ot.CorrelationMatrix(ref_values)]
mats.extend([
    ot.ComplexMatrix(ref_values),
    ot.HermitianMatrix(ref_values),
    ot.TriangularComplexMatrix(ref_values),
    ot.SquareComplexMatrix(ref_values)])

for a in mats:

    # try conversion
    ref = ot.Matrix([[1.0, 0.0], [0.0, 0.5]])
    iname = a.__class__.__name__
    print('a=', a)

    # try scalar mul
    try:
        s = 5.
        ats = a * s
        print('a*s=', ats)
        sta = s * a
def test_model(myModel, test_partial_grad=True, x1=None, x2=None):

    inputDimension = myModel.getInputDimension()
    dimension = myModel.getOutputDimension()

    if x1 is None and x2 is None:
        x1 = ot.Point(inputDimension)
        x2 = ot.Point(inputDimension)
        for j in range(inputDimension):
            x1[j] = -1.0 - j
            x2[j] = 3.0 + 2.0 * j
    else:
        x1 = ot.Point(x1)
        x2 = ot.Point(x2)

    if myModel.isStationary():
        ott.assert_almost_equal(myModel(x1 - x2), myModel(x1, x2), 1e-14,
                                1e-14)
        ott.assert_almost_equal(myModel(x2 - x1), myModel(x1, x2), 1e-14,
                                1e-14)

    eps = 1e-3

    mesh = ot.IntervalMesher([9] * inputDimension).build(
        ot.Interval([-10] * inputDimension, [10] * inputDimension))

    C = myModel.discretize(mesh)
    if dimension == 1:
        # Check that discretize & computeAsScalar provide the
        # same values
        vertices = mesh.getVertices()
        for j in range(len(vertices)):
            for i in range(j, len(vertices)):
                ott.assert_almost_equal(
                    C[i, j], myModel.computeAsScalar(vertices[i], vertices[j]),
                    1e-14, 1e-14)
    else:
        # Check that discretize & operator() provide the
        # same values
        vertices = mesh.getVertices()
        localMatrix = ot.SquareMatrix(dimension)
        for j in range(len(vertices)):
            for i in range(j, len(vertices)):
                for localJ in range(dimension):
                    for localI in range(dimension):
                        localMatrix[localI, localJ] = C[i * dimension + localI,
                                                        j * dimension + localJ]
                ott.assert_almost_equal(localMatrix,
                                        myModel(vertices[i], vertices[j]),
                                        1e-14, 1e-14)

    if test_partial_grad:
        grad = myModel.partialGradient(x1, x2)

        if (dimension == 1):
            gradfd = ot.Matrix(inputDimension, 1)
            for j in range(inputDimension):
                x1_g = ot.Point(x1)
                x1_d = ot.Point(x1)
                x1_g[j] = x1_d[j] + eps
                x1_d[j] = x1_d[j] - eps
                gradfd[j, 0] = (myModel.computeAsScalar(x1_g, x2) -
                                myModel.computeAsScalar(x1_d, x2)) / (2 * eps)
        else:
            gradfd = ot.Matrix(inputDimension, dimension * dimension)
            covarianceX1X2 = myModel(x1, x2)
            centralValue = ot.Point(covarianceX1X2.getImplementation())
            # Loop over the shifted points
            for i in range(inputDimension):
                currentPoint = ot.Point(x1)
                currentPoint[i] += eps
                localCovariance = myModel(currentPoint, x2)
                currentValue = ot.Point(localCovariance.getImplementation())
                for j in range(currentValue.getSize()):
                    gradfd[i, j] = (currentValue[j] - centralValue[j]) / eps

        ott.assert_almost_equal(grad, gradfd, 1e-5, 1e-5,
                                "in " + myModel.getClassName() + " grad")

def C(tau):
    return 1.0 / (1.0 + tau * tau)


t0 = 0.0
t1 = 20.0
N = 40
dt = (t1 - t0) / (N - 1)
myMesh = ot.RegularGrid(t0, dt, N)

myCovarianceCollection = ot.SquareMatrixCollection()
for k in range(N):
    t = myMesh.getValue(k)
    matrix = ot.SquareMatrix(1)
    matrix[0, 0] = C(t)
    myCovarianceCollection.add(matrix)

covarianceModel = ot.UserDefinedStationaryCovarianceModel(
    myMesh, myCovarianceCollection)


def f(tau):
    return [covarianceModel(tau)[0, 0]]


func = ot.PythonFunction(1, 1, f)
func.setDescription(['$t$', '$cov$'])
cov_graph = func.draw(0.0, 20.0, 512)
cov_graph.setTitle('User defined stationary covariance model')
q = 1
dim = 2

# Make a realization of an ARMA model
# Tmin , Tmax and N points for TimeGrid
dt = 1.0
size = 400
timeGrid = ot.RegularGrid(0.0, dt, size)

# white noise
cov = ot.CovarianceMatrix([[0.1, 0.0], [0.0, 0.2]])
whiteNoise = ot.WhiteNoise(ot.Normal([0.0] * dim, cov), timeGrid)

# AR/MA coefficients
ar = ot.ARMACoefficients(p, dim)
ar[0] = ot.SquareMatrix([[-0.5, -0.1], [-0.4, -0.5]])
ar[1] = ot.SquareMatrix([[0.0, 0.0], [-0.25, 0.0]])

ma = ot.ARMACoefficients(q, dim)
ma[0] = ot.SquareMatrix([[-0.4, 0.0], [0.0, -0.4]])

# ARMA model creation
myARMA = ot.ARMA(ar, ma, whiteNoise)

# Create a realization
timeSeries = ot.TimeSeries(myARMA.getRealization())

cov[0, 0] += 0.01 * ot.DistFunc.rNormal()
cov[1, 1] += 0.01 * ot.DistFunc.rNormal()

alpha = ot.SquareMatrix(dim)
Beispiel #15
0
simplicies = [[]] * 6
simplicies[0] = [0, 1, 2, 4]
simplicies[1] = [3, 5, 6, 7]
simplicies[2] = [1, 2, 3, 6]
simplicies[3] = [1, 2, 4, 6]
simplicies[4] = [1, 3, 5, 6]
simplicies[5] = [1, 4, 5, 6]

mesh3D = ot.Mesh(vertices, simplicies)
tree = ot.KDTree(vertices)
print("3D mesh=", mesh3D)
print("volume=", "%.3f" % mesh3D.getVolume())
print("simplices volume=", mesh3D.computeSimplicesVolume())
point = [1.8] * 3
print("Nearest index(", point, ")=", tree.query(point))
points = [[-0.25] * 3, [2.25] * 3]
print("Nearest index(", points, ")=", tree.query(points))
print("P1 gram=\n", mesh3D.computeP1Gram())
rotation = ot.SquareMatrix(3)
rotation[0, 0] = m.cos(m.pi / 3.0)
rotation[0, 1] = m.sin(m.pi / 3.0)
rotation[1, 0] = -m.sin(m.pi / 3.0)
rotation[1, 1] = m.cos(m.pi / 3.0)
rotation[2, 2] = 1.0

# isregular bug
time_grid = ot.RegularGrid(0.0, 0.2, 40963)
mesh = ot.Mesh(time_grid)
print(mesh.isRegular())
Beispiel #16
0
    print('Distribution Parameters ', distParam)

    non_native = distParam.getValues()
    desc = distParam.getDescription()
    print('non-native=', non_native, desc)
    native = distParam.evaluate()
    print('native=', native)
    non_native = distParam.inverse(native)
    print('non-native=', non_native)
    print('built dist=', distParam.getDistribution())

    # derivative of the native parameters with regards the parameters of the
    # distribution
    print(distParam.gradient())

    # by the finite difference technique
    eps = 1e-5
    dim = len(non_native)
    nativeParamGrad = ot.SquareMatrix(ot.IdentityMatrix(dim))

    for i in range(dim):
        for j in range(dim):
            xp = list(non_native)
            xp[i] += eps
            xm = list(non_native)
            xm[i] -= eps
            nativeParamGrad[i, j] = 0.5 * \
                (distParam(xp)[j] - distParam(xm)[j]) / eps

    print(nativeParamGrad)
Beispiel #17
0
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
if ot.SpectralGaussianProcess().__class__.__name__ == 'Process':
    # default to Gaussian for the interface class
    process = ot.GaussianProcess()
elif ot.SpectralGaussianProcess().__class__.__name__ == 'DiscreteMarkovChain':
    process = ot.SpectralGaussianProcess()
    process.setTransitionMatrix(
        ot.SquareMatrix([[0.0, 0.5, 0.5], [0.7, 0.0, 0.3], [0.8, 0.0, 0.2]]))
    origin = 0
    process.setOrigin(origin)
else:
    process = ot.SpectralGaussianProcess()
process.setTimeGrid(ot.RegularGrid(0.0, 0.02, 50))
process.setDescription(['$x$'])
sample = process.getSample(6)
sample_graph = sample.drawMarginal(0)
sample_graph.setTitle(str(process))

fig = plt.figure(figsize=(10, 4))
sample_axis = fig.add_subplot(111)
View(sample_graph, figure=fig, axes=[sample_axis], add_legend=False)
Beispiel #18
0
# Operator +|-
summation = ot.Sample(sample1 + sample2)
subtraction = ot.Sample(sample2 - sample1)
print('sample1 + sample2=', repr(summation))
print('sample2 - sample1=', repr(subtraction))

# Operator +=|-=
sample3 = ot.Sample(sample2)
sample4 = ot.Sample(sample2)
sample3 += sample1
sample4 -= sample1
print('sample3=', repr(sample3))
print('sample4=', repr(sample4))

sample5 = ot.Sample(sample2)
m = ot.SquareMatrix([[1, 2], [3, 5]])
v = ot.Point(2, 3.0)
t = ot.Point(2, 5.0)

print('sample5 =', sample5)

print('sample*2:', sample5 * 2.)
print('2*sample:', 2.0 * sample5)
print('sample/2:', sample5 / 2.)

print('sample*v:', sample5 * v)
print('sample/v:', sample5 / v)

# in-place
sample5 += t
print('sample+=t:', sample5)
Beispiel #19
0
    sampleCollection[continuousDistributionNumber +
                     i] = discreteSampleCollection[i]

factoryCollection = ot.DistributionFactoryCollection(3)
factoryCollection[0] = ot.UniformFactory()
factoryCollection[1] = ot.BetaFactory()
factoryCollection[2] = ot.NormalFactory()
aSample = ot.Uniform(-1.5, 2.5).getSample(size)
model, best_bic = ot.FittingTest.BestModelBIC(aSample, factoryCollection)
print("best model BIC=", repr(model))
model, best_result = ot.FittingTest.BestModelKolmogorov(
    aSample, factoryCollection)
print("best model Kolmogorov=", repr(model))

# BIC adequation
resultBIC = ot.SquareMatrix(distributionNumber)
for i in range(distributionNumber):
    for j in range(distributionNumber):
        value = ot.FittingTest.BIC(sampleCollection[i],
                                   distributionCollection[j], 0)
        resultBIC[i, j] = value
print("resultBIC=", repr(resultBIC))

# Kolmogorov test : case with estimated parameters
print("Kolmogorov test : case with estimated parameters")
distribution = ot.Normal()
sample = distribution.getSample(30)
factory = ot.NormalFactory()
ot.ResourceMap.SetAsUnsignedInteger('FittingTest-KolmogorovSamplingSize',
                                    10000)
fitted_dist, test_result = ot.FittingTest.Kolmogorov(sample, factory)
Beispiel #20
0
    graph.setGrid(True)
    cloud = ot.Cloud(points)
    cloud.setColor(color)
    cloud.setPointStyle("dot")
    graph.add(cloud)
    return graph, s


# %%
# **Definition of some IFS**

# %%
# Spiral
rho1 = 0.9
theta1 = 137.5 * m.pi / 180.0
f1 = [[0.0]*2, ot.SquareMatrix(2, [rho1 * m.cos(theta1), -rho1 * m.sin(theta1), \
                                   rho1 * m.sin(theta1),  rho1 * m.cos(theta1)])]

rho2 = 0.15
f2 = [[1.0, 0.0], rho2 * ot.IdentityMatrix(2)]
f_i = [f1, f2]
graph, s = drawIFS(f_i, skip = 100, iterations = 100000, batch_size = 1, name="Spiral", color="blue")
print("Box counting dimension=%.3f" % s)
view = viewer.View(graph)

# %%
# Fern
f1 = [[0.0]*2, ot.SquareMatrix(2, [0.0, 0.0, 0.0, 0.16])]
f2 = [[0.0, 1.6], ot.SquareMatrix(2, [0.85, 0.04, -0.04, 0.85])]
f3 = [[0.0, 1.6], ot.SquareMatrix(2, [0.2, -0.26, 0.23, 0.22])]
f4 = [[0.0, 0.44], ot.SquareMatrix(2, [-0.15, 0.28, 0.26, 0.24])]
f_i = [f1, f2, f3, f4]
Beispiel #21
0
def test_model(myModel, test_partial_grad=True, x1=None, x2=None):

    inputDimension = myModel.getInputDimension()
    dimension = myModel.getOutputDimension()

    if x1 is None and x2 is None:
        x1 = ot.Point(inputDimension)
        x2 = ot.Point(inputDimension)
        for j in range(inputDimension):
            x1[j] = -1.0 - j
            x2[j] = 3.0 + 2.0 * j
    else:
        x1 = ot.Point(x1)
        x2 = ot.Point(x2)

    if myModel.isStationary():
        ott.assert_almost_equal(myModel(x1 - x2), myModel(x1, x2), 1e-14,
                                1e-14)
        ott.assert_almost_equal(myModel(x2 - x1), myModel(x1, x2), 1e-14,
                                1e-14)

    eps = 1e-3

    mesh = ot.IntervalMesher([7] * inputDimension).build(
        ot.Interval([-10] * inputDimension, [10] * inputDimension))

    C = myModel.discretize(mesh)
    if dimension == 1:
        # Check that discretize & computeAsScalar provide the
        # same values
        vertices = mesh.getVertices()
        for j in range(len(vertices)):
            for i in range(j, len(vertices)):
                ott.assert_almost_equal(
                    C[i, j], myModel.computeAsScalar(vertices[i], vertices[j]),
                    1e-14, 1e-14)
    else:
        # Check that discretize & operator() provide the same values
        vertices = mesh.getVertices()
        localMatrix = ot.SquareMatrix(dimension)
        for j in range(len(vertices)):
            for i in range(j, len(vertices)):
                for localJ in range(dimension):
                    for localI in range(dimension):
                        localMatrix[localI, localJ] = C[i * dimension + localI,
                                                        j * dimension + localJ]
                ott.assert_almost_equal(localMatrix,
                                        myModel(vertices[i], vertices[j]),
                                        1e-14, 1e-14)

    # Now we suppose that discretize is ok
    # we look at crossCovariance of (vertices, vertices) which should return the same values
    C.getImplementation().symmetrize()
    crossCov = myModel.computeCrossCovariance(vertices, vertices)
    ott.assert_almost_equal(
        crossCov, C, 1e-14, 1e-14,
        "in " + myModel.getClassName() + "::computeCrossCovariance")

    # Now crossCovariance(sample, sample) is ok
    # Let us validate crossCovariance(Sample, point) with 1st column(s) of previous calculations
    crossCovSamplePoint = myModel.computeCrossCovariance(vertices, vertices[0])
    crossCovCol = crossCov.reshape(crossCov.getNbRows(), dimension)
    ott.assert_almost_equal(
        crossCovSamplePoint, crossCovCol, 1e-14, 1e-14,
        "in " + myModel.getClassName() + "::computeCrossCovarianceSamplePoint")

    if test_partial_grad:
        grad = myModel.partialGradient(x1, x2)

        if (dimension == 1):
            gradfd = ot.Matrix(inputDimension, 1)
            for j in range(inputDimension):
                x1_g = ot.Point(x1)
                x1_d = ot.Point(x1)
                x1_g[j] = x1_d[j] + eps
                x1_d[j] = x1_d[j] - eps
                gradfd[j, 0] = (myModel.computeAsScalar(x1_g, x2) -
                                myModel.computeAsScalar(x1_d, x2)) / (2 * eps)
        else:
            gradfd = ot.Matrix(inputDimension, dimension * dimension)
            covarianceX1X2 = myModel(x1, x2)
            centralValue = ot.Point(covarianceX1X2.getImplementation())
            # Loop over the shifted points
            for i in range(inputDimension):
                currentPoint = ot.Point(x1)
                currentPoint[i] += eps
                localCovariance = myModel(currentPoint, x2)
                currentValue = ot.Point(localCovariance.getImplementation())
                for j in range(currentValue.getSize()):
                    gradfd[i, j] = (currentValue[j] - centralValue[j]) / eps

        ott.assert_almost_equal(grad, gradfd, 1e-5, 1e-5,
                                "in " + myModel.getClassName() + " grad")
    ["X1", "X2", "X3"], ["sin(X1) + 5.0 * (sin(X2))^2 + 0.1 * X3^4 * sin(X1)"])

# Output
Y = modelIshigami(X)

# Using the same covariance model for each marginal
Cov1 = ot.SquaredExponential(1)

# Output covariance model
Cov2 = ot.SquaredExponential(1)

# Set output covariance scale
Cov2.setScale(Y.computeStandardDeviation())

# This is the GSA-type estimator: weight is 1.
W = ot.SquareMatrix(size)
for i in range(size):
    W[i, i] = 1.0

# Using a biased estimator
estimatorTypeV = ot.HSICVStat()

# Loop over marginals
hsicIndexRef = [0.02331323, 0.00205350, 0.00791711]
for i in range(3):
    test = X.getMarginal(i)
    # Set input covariance scale
    Cov1.setScale(test.computeStandardDeviation())
    hsicIndex = estimatorTypeV.computeHSICIndex(test, Y, Cov1, Cov2, W)
    ott.assert_almost_equal(hsicIndex, hsicIndexRef[i])
Beispiel #23
0
    def run(self):
        """
        Launch the algorithm and build the POD models.

        Notes
        -----
        This method launches the iterative algorithm. First the censored data
        are filtered if needed. The Box Cox transformation is performed if it is
        enabled. Then the enrichment of the design of experiments is performed.
        Once the algorithm stops, it builds the POD models : conditional samples are 
        simulated for each defect size, then the distributions of the probability
        estimator (for MC simulation) are built. Eventually, a sample of this
        distribution is used to compute the mean POD and the POD at the confidence
        level.
        """

        # Create an initial uniform distribution if not given
        if self._distribution is None:
            inputMin = self._input.getMin()
            inputMin[0] = np.min(self._defectSizes)
            inputMax = self._input.getMax()
            inputMax[0] = np.max(self._defectSizes)
            marginals = [ot.Uniform(inputMin[i], inputMax[i]) for i in range(self._dim)]
            self._distribution = ot.ComposedDistribution(marginals)

        # Create the design of experiments of the candidate points where the
        # criterion is computed
        if self._distribution.hasIndependentCopula():
            # without copula use low discrepancy experiment as first doe
            doeCandidate = ot.LowDiscrepancyExperiment(ot.SobolSequence(), 
                            self._distribution, self._candidateSize).generate()
        else:
            # else simple Monte Carlo distribution
            doeCandidate = self._distribution.getSample(self._candidateSize)

        # build initial kriging model
        # build the kriging model without optimization
        algoKriging, transformation = self._buildKrigingAlgo(self._input, self._signals)
        if self._verbose:
            print('Building the kriging model')
            print('Optimization of the covariance model parameters...')

        llDim = algoKriging.getReducedLogLikelihoodFunction().getInputDimension()
        lowerBound = [0.001] * llDim
        upperBound = [50] * llDim
        algoKriging = self._estimKrigingTheta(algoKriging,
                                              lowerBound, upperBound,
                                              self._initialStartSize)
        algoKriging.run()

        # Get kriging results
        self._krigingResult = algoKriging.getResult() 
        self._covarianceModel = self._krigingResult.getCovarianceModel()
        self._basis = self._krigingResult.getBasisCollection()
        metamodel = ot.ComposedFunction(self._krigingResult.getMetaModel(), transformation)

        self._Q2 = self._computeQ2(self._input, self._signals, self._krigingResult, transformation)
        if self._verbose:
            print('Kriging validation Q2 (>0.9): {:0.4f}\n'.format(self._Q2))

        plt.ion()
        # Start the improvment loop
        iteration = 0
        while iteration < self._nIteration:
            iteration += 1
            if self._verbose:
                print('Iteration : {}/{}'.format(iteration, self._nIteration))

            # compute POD (ptrue = pn-1) for bias reducing in the criterion
            # Monte Carlo for all defect sizes in a vectorized way.
            # get Sample for all parameters except the defect size
            samplePred = self._distribution.getSample(self._samplingSize)[:,1:]
            fullSamplePred = ot.Sample(self._samplingSize * self._defectNumber,
                                                self._dim)
            # Add the defect sizes as first value 
            for i, defect in enumerate(self._defectSizes):
                fullSamplePred[self._samplingSize*i:self._samplingSize*(i+1), :] = \
                                        self._mergeDefectInX(defect, samplePred)
            meanPredictionSample = metamodel(fullSamplePred)
            meanPredictionSample = np.reshape(meanPredictionSample, (self._samplingSize,
                                                    self._defectNumber), 'F')
            # compute the POD for all defect sizes
            currentPOD = np.mean(meanPredictionSample > self._detectionBoxCox, axis=0)

            # Compute criterion for all candidate in the candidate doe
            criterion = 1000000000
            for icand, candidate in enumerate(doeCandidate):

                # add the current candidate to the kriging doe
                inputAugmented = self._input[:]
                inputAugmented.add(candidate)
                signalsAugmented = self._signals[:]
                # predict the signal value of the candidate using the current
                # kriging model
                signalsAugmented.add(metamodel(candidate))
                # create a temporary kriging model with the new doe and without
                # updating the covariance model parameters

                # normalization
                mean = inputAugmented.computeMean()
                try:
                    stddev = inputAugmented.computeStandardDeviation()
                except AttributeError:
                    stddev = inputAugmented.computeStandardDeviationPerComponent()
                linear = ot.SquareMatrix(self._dim)
                for j in range(self._dim):
                    linear[j, j] = 1.0 / stddev[j] if abs(stddev[j]) > 1e-12 else 1.0
                zero = [0.0] * self._dim
                transformation = ot.LinearFunction(mean, zero, linear)

                algoKrigingTemp = ot.KrigingAlgorithm(transformation(inputAugmented), signalsAugmented,
                                                      self._covarianceModel,
                                                      self._basis)
                optimizer = algoKrigingTemp.getOptimizationAlgorithm()
                optimizer.setMaximumIterationNumber(0)
                algoKrigingTemp.setOptimizationAlgorithm(optimizer)
                algoKrigingTemp.run()
                krigingResultTemp = algoKrigingTemp.getResult()

                # compute the criterion for all defect size
                crit = []
                # save results, used to compute the PODModel et PODCLModel
                PODPerDefect = ot.Sample(self._simulationSize *
                                         self._samplingSize, self._defectNumber)
                for idef, defect in enumerate(self._defectSizes):
                    podSample = self._computePODSamplePerDefect(defect,
                        self._detectionBoxCox, krigingResultTemp, transformation,
                        self._distribution, self._simulationSize, self._samplingSize)
                    PODPerDefect[:, idef] = podSample

                    meanPOD = podSample.computeMean()[0]
                    varPOD = podSample.computeVariance()[0]
                    crit.append(varPOD + (meanPOD - currentPOD[idef])**2)
                # compute the criterion aggregated for all defect sizes
                newCriterion = np.sqrt(np.mean(crit))

                # check if the result is better or not
                if newCriterion < criterion:
                    self._PODPerDefect = PODPerDefect
                    criterion = newCriterion
                    indexOpt = icand
                
                if self._verbose:
                    updateProgress(icand, int(doeCandidate.getSize()), 'Computing criterion')

            # get the best candidate
            candidateOpt = doeCandidate[indexOpt]
            # add new point to DOE
            self._input.add(candidateOpt)
            # add the signal computed by the physical model
            if self._boxCox:
                self._signals.add(self._boxCoxTransform(self._physicalModel(candidateOpt) + [self._shift]))
            else:
                self._signals.add(self._physicalModel(candidateOpt))
            # remove added candidate from the doeCandidate
            doeCandidate.erase(indexOpt)
            if self._verbose:
                print('Criterion value : {:0.4f}'.format(criterion))
                print('Added point : {}'.format(candidateOpt))
                print('Update the kriging model')

            # update the kriging model without optimization
            algoKriging, transformation = self._buildKrigingAlgo(self._input, self._signals)
            algoKriging.setOptimizeParameters(False)
            algoKriging.run()
            self._Q2 = self._computeQ2(self._input, self._signals, algoKriging.getResult(), transformation)

            # Check the quality of the kriging model if it needs optimization
            if self._Q2 < 0.95:
                if self._verbose:
                    print('Optimization of the covariance model parameters...')

                algoKriging.setOptimizeParameters(True)
                algoKriging = self._estimKrigingTheta(algoKriging,
                                                      lowerBound, upperBound,
                                                      self._initialStartSize)
                algoKriging.run()

            # Get kriging results
            self._krigingResult = algoKriging.getResult()
            self._covarianceModel = self._krigingResult.getCovarianceModel()
            self._basis = self._krigingResult.getBasisCollection()

            self._Q2 = self._computeQ2(self._input, self._signals, self._krigingResult, transformation)
            if self._verbose:
                print('Kriging validation Q2 (>0.9): {:0.4f}'.format(self._Q2))

            if self._graph:
                # create the interpolate function of the POD model
                meanPOD = self._PODPerDefect.computeMean()
                interpModel = interp1d(self._defectSizes, np.array(meanPOD), kind='linear')
                self._PODmodel = ot.PythonFunction(1, 1, interpModel)
                # The POD at confidence level is built in getPODCLModel() directly
                fig, ax = self.drawPOD(self._probabilityLevel, self._confidenceLevel)
                plt.draw()
                plt.pause(0.001)
                plt.show()
                if self._graphDirectory is not None:
                    if not os.path.exists(self._graphDirectory):
                        os.makedirs(self._graphDirectory)
                    fig.savefig(os.path.join(self._graphDirectory, 'AdaptiveSignalPOD_')+str(iteration),
                                bbox_inches='tight', transparent=True)

        # Compute the final POD with the last updated kriging model
        if self._verbose:
                print('\nStart computing the POD with the last updated kriging model')
        # compute the sample containing the POD values for all defect 
        self._PODPerDefect = ot.Sample(self._simulationSize *
                                         self._samplingSize, self._defectNumber)
        for i, defect in enumerate(self._defectSizes):
            self._PODPerDefect[:, i] = self._computePODSamplePerDefect(defect,
                self._detectionBoxCox, self._krigingResult, transformation,
                self._distribution, self._simulationSize, self._samplingSize)
            if self._verbose:
                updateProgress(i, self._defectNumber, 'Computing POD per defect')

        # compute the mean POD 
        meanPOD = self._PODPerDefect.computeMean()
        # create the interpolate function of the POD model
        interpModel = interp1d(self._defectSizes, np.array(meanPOD), kind='linear')
        self._PODmodel = ot.PythonFunction(1, 1, interpModel)

        # The POD at confidence level is built in getPODCLModel() directly

        # remove the interactive plotting
        plt.ioff()