コード例 #1
0
def test_two_outputs():
    f = ot.SymbolicFunction(['x'], ['x * sin(x)', 'x * cos(x)'])
    sampleX = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]
    sampleY = f(sampleX)
    basis = ot.Basis([
        ot.SymbolicFunction(['x'], ['x']),
        ot.SymbolicFunction(['x'], ['x^2'])
    ])
    covarianceModel = ot.SquaredExponential([1.0])
    covarianceModel.setActiveParameter([])
    covarianceModel = ot.TensorizedCovarianceModel([covarianceModel] * 2)
    algo = ot.KrigingAlgorithm(sampleX, sampleY, covarianceModel, basis)
    algo.run()
    result = algo.getResult()
    mm = result.getMetaModel()
    assert mm.getOutputDimension() == 2, "wrong output dim"
    ott.assert_almost_equal(mm(sampleX), sampleY)
    # Check the conditional covariance
    reference_covariance = ot.Matrix([[4.4527, 0.0, 8.34404, 0.0],
                                      [0.0, 2.8883, 0.0, 5.41246],
                                      [8.34404, 0.0, 15.7824, 0.0],
                                      [0.0, 5.41246, 0.0, 10.2375]])
    ott.assert_almost_equal(
        result([[9.5], [10.0]]).getCovariance() - reference_covariance,
        ot.Matrix(4, 4), 0.0, 2e-2)
コード例 #2
0
 def runPCA(self):
     # Perform PCA
     data = np.array(self.sample).T
     # Make the column-mean zero
     columnmean = data.mean(axis=0)
     for i in range(self.verticesNumber):
         data[:, i] = data[:, i] - columnmean[i]
     # Compute SVD of the matrix
     mymatrix = ot.Matrix(data)
     singular_values, U, VT = mymatrix.computeSVD(True)
     V = VT.transpose()
     # Truncate
     VL = V[:, 0:self.numberOfComponents]
     # Project
     self.principalComponents = np.array(mymatrix * VL)
     # Compute explained variance
     explained_variance = ot.Point(self.verticesNumber)
     for i in range(self.verticesNumber):
         explained_variance[i] = singular_values[i]**2
     n_samples = self.processSample.getSize()
     explained_variance /= n_samples - 1
     # Compute total variance
     total_var = explained_variance.norm1()
     # Compute explained variance ratio
     explained_variance_ratio = explained_variance / total_var
     # Truncate
     self.explained_variance_ratio = explained_variance_ratio[0:self.numberOfComponents]
コード例 #3
0
    def _PODgaussModelCl(self, defects, intercept, slope, stderr, detection):
        class buildPODModel():
            def __init__(self, intercept, slope, sigmaEpsilon, detection):

                self.intercept = intercept
                self.slope = slope
                self.sigmaEpsilon = sigmaEpsilon
                self.detection = detection

            def PODmodel(self, x):
                t = (self.detection -
                     (self.intercept + self.slope * x)) / self.sigmaEpsilon
                return ot.DistFunc.pNormal(t, True)

        N = defects.getSize()
        X = ot.Sample(N, [1, 0])
        X[:, 1] = defects
        X = ot.Matrix(X)
        covMatrix = X.computeGram(True).solveLinearSystem(ot.IdentityMatrix(2))
        sampleNormal = ot.Normal([0, 0],
                                 ot.CovarianceMatrix(
                                     covMatrix.getImplementation())).getSample(
                                         self._simulationSize)
        sampleSigmaEpsilon = (ot.Chi(N - 2).inverse() * np.sqrt(N - 2) *
                              stderr).getSample(self._simulationSize)

        PODcoll = []
        for i in range(self._simulationSize):
            sigmaEpsilon = sampleSigmaEpsilon[i][0]
            interceptSimu = sampleNormal[i][0] * sigmaEpsilon + intercept
            slopeSimu = sampleNormal[i][1] * sigmaEpsilon + slope
            PODcoll.append(
                buildPODModel(interceptSimu, slopeSimu, sigmaEpsilon,
                              detection).PODmodel)
        return PODcoll
コード例 #4
0
def convert_hmat_to_matrix(hmat):
    res = ot.Matrix(hmat.getNbRows(), hmat.getNbColumns())
    for i in range(hmat.getNbRows()):
        x = ot.Point(hmat.getNbColumns())
        x[i] = 1.0
        y = ot.Point(hmat.getNbRows())
        hmat.gemv('N', 1.0, x, 0.0, y)
        for j in range(hmat.getNbColumns()):
            res[i, j] = y[j]
    return res
コード例 #5
0
 def gradient(self, point):
     shape = np.array(point).shape
     # Check passed argument (should be 1D array)
     if len(shape) == 1 and shape[0] == self.__inputs_dimension:
         result = self.__p7_model.grad(point)
         self.__calls_number += 1
         return ot.Matrix(result).transpose()
     else:
         raise ValueError('Wrong shape %s of input array, required: (%s,)' %
                          (shape, self.__inputs_dimension))
コード例 #6
0
def computeDurbinWatsonTest(x, residuals, hypothesis="Equal"):
    # Parameters:
    # hypothesis : string
    #    "Equal" : hypothesis is autocorrelation is 0
    #    "Less" : hypothesis is autocorrelation is less than 0
    #    "Greater" : hypothesis is autocorrelation is greater than 0
    nx = x.getSize()
    dim = x.getDimension()
    residuals = np.array(residuals)
    # statistic Durbin Watson
    dw = np.sum(np.diff(np.hstack(residuals))**2) / np.sum(residuals**2)

    # Normal approxiimation of DW to compute the pvalue
    X = ot.Matrix(nx, dim + 1)
    X[:, 0] = np.ones((nx, 1))
    X[:, 1] = x
    B = ot.Matrix(nx, dim + 1)
    B[0, 1] = x[0][0] - x[1][0]
    B[nx - 1, 1] = x[nx - 1][0] - x[nx - 2][0]
    for i in range(nx - 2):
        B[i + 1, 1] = -x[i][0] + 2 * x[i + 1][0] - x[i + 2][0]

    XtX = X.computeGram()
    XBQt = ot.SquareMatrix(XtX.solveLinearSystem(B.transpose() * X))
    P = 2 * (nx - 1) - XBQt.computeTrace()
    XBTrace = ot.SquareMatrix(XtX.solveLinearSystem(B.computeGram(),
                                                    False)).computeTrace()
    Q = 2 * (3 * nx - 4) - 2 * XBTrace + ot.SquareMatrix(
        XBQt * XBQt).computeTrace()
    dmean = P / (nx - (dim + 1))
    dvar = 2.0 / ((nx - (dim + 1)) * (nx - (dim + 1) + 2)) * (Q - P * dmean)

    # compute the pvalue with respect to hypothesis
    # Default pvalue is for hypothesis == "Equal"
    # complementary CDF of standard normal distribution
    pValue = 2 * ot.DistFunc.pNormal(np.abs(dw - dmean) / np.sqrt(dvar), True)
    if hypothesis == "Less":
        pValue = 1 - pValue / 2
    elif hypothesis == "Greater":
        pValue = pValue / 2

    return pValue
コード例 #7
0
def test_model(myModel, test_grad=True, x1=None, x2=None):

    print('myModel = ',  myModel)

    spatialDimension = myModel.getInputDimension()
    dimension = myModel.getOutputDimension()
    active = myModel.getActiveParameter()
    print('active=', active)
    print('parameter=', myModel.getParameter())
    print('parameterDescription=', myModel.getParameterDescription())

    if x1 is None and x2 is None:
        x1 = ot.Point(spatialDimension)
        x2 = ot.Point(spatialDimension)
        for j in range(spatialDimension):
            x1[j] = -1.0 - j
            x2[j] = 3.0 + 2.0 * j

    eps = 1e-5
    print('myModel(', x1, ', ', x2, ')=',  repr(myModel(x1, x2)))

    grad = myModel.partialGradient(x1, x2)
    print('dCov =', repr(grad))

    if (dimension == 1):
        gradfd = ot.Point(spatialDimension)
        for j in range(spatialDimension):
            x1_d = ot.Point(x1)
            x1_d[j] = x1_d[j] + eps
            gradfd[j] = (myModel(x1_d, x2)[0, 0] - myModel(x1, x2)[0, 0]) / eps
    else:
        gradfd = ot.Matrix(spatialDimension, dimension * dimension)
        covarianceX1X2 = myModel(x1, x2)
        # Symmetrize matrix
        covarianceX1X2.getImplementation().symmetrize()
        centralValue = ot.Point(covarianceX1X2.getImplementation())
        # Loop over the shifted points
        for i in range(spatialDimension):
            currentPoint = ot.Point(x1)
            currentPoint[i] += eps
            localCovariance = myModel(currentPoint, x2)
            localCovariance.getImplementation().symmetrize()
            currentValue = ot.Point(
                localCovariance.getImplementation())
            for j in range(currentValue.getSize()):
                gradfd[i, j] = (currentValue[j] - centralValue[j]) / eps
    print('dCov (FD)=', repr(gradfd))

    if test_grad:
        pGrad = myModel.parameterGradient(x1, x2)
        precision = ot.PlatformInfo.GetNumericalPrecision()
        ot.PlatformInfo.SetNumericalPrecision(4)
        print('dCov/dP=', pGrad)
        ot.PlatformInfo.SetNumericalPrecision(precision)
コード例 #8
0
def hyperplane(coefs):
    """
    Generate a linear NMF from its coefficients
    """
    dim = len(coefs)
    constant = [0.]
    center = [0.] * len(coefs)
    linear = ot.Matrix(1, dim)
    for i in range(dim):
        linear[0, i] = coefs[i]
    function = ot.LinearFunction(center, constant, linear)
    return function
コード例 #9
0
def test_model(myModel):

    print('myModel = ', myModel)

    spatialDimension = myModel.getSpatialDimension()
    dimension = myModel.getDimension()
    active = myModel.getActiveParameter()
    print('active=', active)
    print('parameter=', myModel.getParameter())
    print('parameterDescription=', myModel.getParameterDescription())

    x1 = ot.NumericalPoint(spatialDimension)
    x2 = ot.NumericalPoint(spatialDimension)
    for j in range(spatialDimension):
        x1[j] = -1.0 - j
        x2[j] = 3.0 + 2.0 * j

    eps = 1e-5
    if (dimension == 1):
        print('myModel(', x1, ', ', x2, ')=', repr(myModel(x1, x2)))

        grad = myModel.partialGradient(x1, x2)
        print('dCov =', repr(grad))
        gradfd = ot.NumericalPoint(spatialDimension)
        for j in range(spatialDimension):
            x1_d = ot.NumericalPoint(x1)
            x1_d[j] = x1_d[j] + eps
            gradfd[j] = (myModel(x1_d, x2)[0, 0] - myModel(x1, x2)[0, 0]) / eps
        print('dCov (FD)=', repr(gradfd))
    else:
        print('myModel(', x1, ', ', x2, ')=', repr(myModel(x1, x2)))

        grad = myModel.partialGradient(x1, x2)
        print('dCov =', repr(grad))

        gradfd = ot.Matrix(spatialDimension, dimension * dimension)
        covarianceX1X2 = myModel(x1, x2)
        # Symmetrize matrix
        covarianceX1X2.getImplementation().symmetrize()
        centralValue = ot.NumericalPoint(covarianceX1X2.getImplementation())
        # Loop over the shifted points
        for i in range(spatialDimension):
            currentPoint = ot.NumericalPoint(x1)
            currentPoint[i] += eps
            localCovariance = myModel(currentPoint, x2)
            localCovariance.getImplementation().symmetrize()
            currentValue = ot.NumericalPoint(
                localCovariance.getImplementation())
            for j in range(currentValue.getSize()):
                gradfd[i, j] = (currentValue[j] - centralValue[j]) / eps
        print('dCov (FD)=', repr(gradfd))
コード例 #10
0
 def _PODgaussModel(self, defects, stderr, linearModel):
     X = ot.NumericalSample(defects.getSize(), [1, 0])
     X[:, 1] = defects
     X = ot.Matrix(X)
     # compute the prediction variance of the linear regression model
     def predictionVariance(x):
         Y = ot.NumericalPoint([1.0, x])
         gramX = X.computeGram()
         return stderr**2 * (1. + ot.dot(Y, gramX.solveLinearSystem(Y)))
     # function to compute the POD(defect)
     def PODmodel(x):
         t = (self._detectionBoxCox - linearModel(x[0])) / np.sqrt(predictionVariance(x[0]))
         # DistFunc.pNormal(t,True) = complementary CDF of the Normal(0,1)
         return [ot.DistFunc.pNormal(t,True)]
     return PODmodel
コード例 #11
0
def test_one_input_one_output():
    sampleSize = 6
    dimension = 1

    f = ot.SymbolicFunction(['x0'], ['x0 * sin(x0)'])

    X = ot.Sample(sampleSize, dimension)
    X2 = ot.Sample(sampleSize, dimension)
    for i in range(sampleSize):
        X[i, 0] = 3.0 + i
        X2[i, 0] = 2.5 + i
    X[0, 0] = 1.0
    X[1, 0] = 3.0
    X2[0, 0] = 2.0
    X2[1, 0] = 4.0
    Y = f(X)
    Y2 = f(X2)

    # create algorithm
    basis = ot.ConstantBasisFactory(dimension).build()
    covarianceModel = ot.SquaredExponential([1e-02], [4.50736])

    algo = ot.KrigingAlgorithm(X, Y, covarianceModel, basis)
    algo.run()

    # perform an evaluation
    result = algo.getResult()

    ott.assert_almost_equal(result.getMetaModel()(X), Y)
    ott.assert_almost_equal(result.getResiduals(), [1.32804e-07], 1e-3, 1e-3)
    ott.assert_almost_equal(result.getRelativeErrors(), [5.20873e-21])

    # Kriging variance is 0 on learning points
    covariance = result.getConditionalCovariance(X)
    covariancePoint = ot.Point(covariance.getImplementation())
    theoricalVariance = ot.Point(sampleSize * sampleSize)
    ott.assert_almost_equal(covariance,
                            ot.Matrix(sampleSize, sampleSize),
                            8.95e-7, 8.95e-7)

    # Covariance per marginal & extract variance component
    coll = result.getConditionalMarginalCovariance(X)
    var = [mat[0, 0] for mat in coll]
    ott.assert_almost_equal(var, [0]*sampleSize, 1e-14, 1e-14)

    # Variance per marginal
    var = result.getConditionalMarginalVariance(X)
    ott.assert_almost_equal(var, ot.Point(sampleSize), 1e-14, 1e-14)
コード例 #12
0
    def getQ2(self):
        """
        Accessor to the Q2 value. 

        Returns
        -------
        Q2 : float
            The Q2 value computed analytically.
        """
        basisMatrix = ot.Matrix(self._basisFunction(self._input))
        gramBasis = basisMatrix.computeGram()
        H = basisMatrix * gramBasis.solveLinearSystem(basisMatrix.transpose())
        Hdiag = np.vstack(np.array(H).diagonal())
        fittedSignals = np.array(self._chaosPred(self._input))
        delta = np.array(self._signals - fittedSignals) / (1. - Hdiag)

        return 1 - np.mean(delta**2) / self._signals.computeVariance()[0]
コード例 #13
0
def test_model(myModel):

    print("myModel = ",  myModel)

    spatialDimension = myModel.getSpatialDimension()
    dimension = myModel.getDimension()

    x1 = ot.NumericalPoint(spatialDimension)
    x2 = ot.NumericalPoint(spatialDimension)
    for j in range(spatialDimension):
        x1[j] = -1.0 - j
        x2[j] = 3.0 + 2.0 * j

    eps = 1e-5
    if (dimension == 1):
        print("myModel(", x1, ", ", x2, ")=",  myModel(x1, x2))

        grad = myModel.partialGradient(x1, x2)
        print("dCov =", grad)
        gradfd = ot.NumericalPoint(spatialDimension)
        for j in range(spatialDimension):
            x1_d = ot.NumericalPoint(x1)
            x1_d[j] = x1_d[j] + eps
            gradfd[j] = (myModel(x1_d, x2)[0, 0] - myModel(x1, x2)[0, 0]) / eps
        print("dCov (FD)=", gradfd)
    else:
        print("myModel(", x1, ", ", x2, ")=",  repr(myModel(x1, x2)))

        grad = myModel.partialGradient(x1, x2)
        print("dCov =", repr(grad))

        gradfd = ot.Matrix(spatialDimension, dimension * dimension)
        covarianceX1X2 = myModel(x1, x2);
        # Symmetrize matrix
        covarianceX1X2.getImplementation().symmetrize()
        centralValue = ot.NumericalPoint(covarianceX1X2.getImplementation())
        # Loop over the shifted points
        for i in range(spatialDimension):
            currentPoint = ot.NumericalPoint(x1)
            currentPoint[i] += eps
            localCovariance = myModel(currentPoint, x2);
            localCovariance.getImplementation().symmetrize()
            currentValue = ot.NumericalPoint(localCovariance.getImplementation())
            for j in range(currentValue.getSize()):
                gradfd[i, j] = (currentValue[j] - centralValue[j]) / eps;
        print("dCov (FD)=", repr(gradfd))
コード例 #14
0
sampler.setBurnIn(2000)

# get a realization
realization = sampler.getRealization()
print('y1=', realization)

# try to generate a sample
sampleSize = 1000
sample = sampler.getSample(sampleSize)

x_mu = sample.computeMean()
x_sigma = sample.computeStandardDeviation()

# compute covariance
x_cov = sample.computeCovariance()
P = ot.Matrix(obsSize, chainDim)
for i in range(obsSize):
    for j in range(chainDim):
        P[i, j] = p[i, j]
Qn = P.transpose() * P + Q0
Qn_inv = ot.SquareMatrix(chainDim)
for j in range(chainDim):
    I_j = [0] * chainDim
    I_j[j] = 1.0
    Qn_inv_j = Qn.solveLinearSystem(I_j)
    for i in range(chainDim):
        Qn_inv[i, j] = Qn_inv_j[i]

sigma_exp = [0] * chainDim
for i in range(chainDim):
    sigma_exp[i] = m.sqrt(Qn_inv[i, i])
コード例 #15
0
def test_one_input_one_output():
    sampleSize = 6
    dimension = 1

    f = ot.SymbolicFunction(['x0'], ['x0 * sin(x0)'])

    X = ot.Sample(sampleSize, dimension)
    X2 = ot.Sample(sampleSize, dimension)
    for i in range(sampleSize):
        X[i, 0] = 3.0 + i
        X2[i, 0] = 2.5 + i
    X[0, 0] = 1.0
    X[1, 0] = 3.0
    X2[0, 0] = 2.0
    X2[1, 0] = 4.0
    Y = f(X)
    Y2 = f(X2)

    # create covariance model
    basis = ot.ConstantBasisFactory(dimension).build()
    covarianceModel = ot.SquaredExponential()

    # create algorithm
    algo = ot.KrigingAlgorithm(X, Y, covarianceModel, basis)

    # set sensible optimization bounds and estimate hyperparameters
    algo.setOptimizationBounds(ot.Interval(X.getMin(), X.getMax()))
    algo.run()

    # perform an evaluation
    result = algo.getResult()

    ott.assert_almost_equal(result.getMetaModel()(X), Y)
    ott.assert_almost_equal(result.getResiduals(), [1.32804e-07], 1e-3, 1e-3)
    ott.assert_almost_equal(result.getRelativeErrors(), [5.20873e-21])

    # Kriging variance is 0 on learning points
    covariance = result.getConditionalCovariance(X)
    nullMatrix = ot.Matrix(sampleSize, sampleSize)
    ott.assert_almost_equal(covariance, nullMatrix, 0.0, 1e-13)

    # Kriging variance is non-null on validation points
    validCovariance = result.getConditionalCovariance(X2)
    values = ot.Matrix([[
        0.81942182, -0.35599947, -0.17488593, 0.04622401, -0.03143555,
        0.04054783
    ],
                        [
                            -0.35599947, 0.20874735, 0.10943841, -0.03236419,
                            0.02397483, -0.03269184
                        ],
                        [
                            -0.17488593, 0.10943841, 0.05832917, -0.01779918,
                            0.01355719, -0.01891618
                        ],
                        [
                            0.04622401, -0.03236419, -0.01779918, 0.00578327,
                            -0.00467674, 0.00688697
                        ],
                        [
                            -0.03143555, 0.02397483, 0.01355719, -0.00467674,
                            0.0040267, -0.00631173
                        ],
                        [
                            0.04054783, -0.03269184, -0.01891618, 0.00688697,
                            -0.00631173, 0.01059488
                        ]])
    ott.assert_almost_equal(validCovariance - values, nullMatrix, 0.0, 1e-7)

    # Covariance per marginal & extract variance component
    coll = result.getConditionalMarginalCovariance(X)
    var = [mat[0, 0] for mat in coll]
    ott.assert_almost_equal(var, [0] * sampleSize, 1e-14, 1e-13)

    # Variance per marginal
    var = result.getConditionalMarginalVariance(X)
    ott.assert_almost_equal(var, ot.Sample(sampleSize, 1), 1e-14, 1e-13)

    # Prediction accuracy
    ott.assert_almost_equal(Y2, result.getMetaModel()(X2), 0.3, 0.0)
コード例 #16
0
sampler.setBurnIn(2000)

# get a realization
realization = sampler.getRealization()
print('y1=', realization)

# try to generate a sample
sampleSize = 1000
sample = sampler.getSample(sampleSize)

x_mu = sample.computeMean()
x_sigma = sample.computeStandardDeviation()

# compute covariance
x_cov = sample.computeCovariance()
P = ot.Matrix(obsSize, chainDim)
for i in range(obsSize):
    for j in range(chainDim):
        P[i, j] = p[i, j]
Qn = P.transpose() * P + Q0
Qn_inv = ot.SquareMatrix(chainDim)
for j in range(chainDim):
    I_j = [0] * chainDim
    I_j[j] = 1.0
    Qn_inv_j = Qn.solveLinearSystem(I_j)
    for i in range(chainDim):
        Qn_inv[i, j] = Qn_inv_j[i]

sigma_exp = [0] * chainDim
for i in range(chainDim):
    sigma_exp[i] = m.sqrt(Qn_inv[i, i])
コード例 #17
0
ファイル: t_Study_saveload.py プロジェクト: adutfoy/openturns
    p4[0] = 102.
    p4[1] = 202.
    s1[2] = p4
    myStudy.add('mySample', s1)

    # Add a point with a description
    pDesc = ot.PointWithDescription(p1)
    desc = pDesc.getDescription()
    desc[0] = 'x'
    desc[1] = 'y'
    desc[2] = 'z'
    pDesc.setDescription(desc)
    myStudy.add(pDesc)

    # Add a matrix
    matrix = ot.Matrix(2, 3)
    matrix[0, 0] = 0
    matrix[0, 1] = 1
    matrix[0, 2] = 2
    matrix[1, 0] = 3
    matrix[1, 1] = 4
    matrix[1, 2] = 5
    myStudy.add('m', matrix)

    # Create a Point that we will try to reinstaciate after reloading
    point = ot.Point(2, 1000.)
    point.setName('point')
    myStudy.add('point', point)

    # Create a Simulation::Result
    simulationResult = ot.ProbabilitySimulationResult(ot.ThresholdEvent(), 0.5,
コード例 #18
0
from __future__ import print_function
import openturns as ot

t_names = [
    'Matrix', 'SquareMatrix', 'TriangularMatrix', 'SymmetricMatrix',
    'CovarianceMatrix', 'CorrelationMatrix'
]
t_names.extend([
    'ComplexMatrix', 'HermitianMatrix', 'TriangularComplexMatrix',
    'SquareComplexMatrix'
])

for i, iname in enumerate(t_names):

    # try conversion
    ref = ot.Matrix([[1.0, 0.0], [0.0, 0.5]])
    a = getattr(ot, iname)(ref)
    print('a=', a)

    # try scalar mul
    try:
        s = 5.
        ats = a * s
        print('a*s=', ats)
        sta = s * a
        print('s*a=', sta)
    except:
        print('no scalar mul for', iname)

    # try scalar div
    try:
コード例 #19
0
sampleSize = 3

X = ot.Sample(sampleSize, 1)
for i in range(sampleSize):
    X[i, 0] = i + 1.0

Y = ot.Sample(sampleSize, 1)

phis = []
for j in range(basisSize):
    phis.append(ot.SymbolicFunction(['x'], ['x^' + str(j + 1)]))
basis = ot.Basis(phis)
for i in range(basisSize):
    print(ot.FunctionCollection(basis)[i](X))

proxy = ot.DesignProxy(X, basis)
full = range(basisSize)

design = proxy.computeDesign(full)
print(design)

proxy.setWeight([0.5] * sampleSize)
design = proxy.computeDesign(full)
print(design)

proxy = ot.DesignProxy(ot.Matrix(design))
full = range(basisSize)

design = proxy.computeDesign(full)
print(design)
コード例 #20
0
def test_model(myModel, test_partial_grad=True, x1=None, x2=None):

    inputDimension = myModel.getInputDimension()
    dimension = myModel.getOutputDimension()

    if x1 is None and x2 is None:
        x1 = ot.Point(inputDimension)
        x2 = ot.Point(inputDimension)
        for j in range(inputDimension):
            x1[j] = -1.0 - j
            x2[j] = 3.0 + 2.0 * j
    else:
        x1 = ot.Point(x1)
        x2 = ot.Point(x2)

    if myModel.isStationary():
        ott.assert_almost_equal(myModel(x1 - x2), myModel(x1, x2), 1e-14,
                                1e-14)
        ott.assert_almost_equal(myModel(x2 - x1), myModel(x1, x2), 1e-14,
                                1e-14)

    eps = 1e-3

    mesh = ot.IntervalMesher([9] * inputDimension).build(
        ot.Interval([-10] * inputDimension, [10] * inputDimension))

    C = myModel.discretize(mesh)
    if dimension == 1:
        # Check that discretize & computeAsScalar provide the
        # same values
        vertices = mesh.getVertices()
        for j in range(len(vertices)):
            for i in range(j, len(vertices)):
                ott.assert_almost_equal(
                    C[i, j], myModel.computeAsScalar(vertices[i], vertices[j]),
                    1e-14, 1e-14)
    else:
        # Check that discretize & operator() provide the
        # same values
        vertices = mesh.getVertices()
        localMatrix = ot.SquareMatrix(dimension)
        for j in range(len(vertices)):
            for i in range(j, len(vertices)):
                for localJ in range(dimension):
                    for localI in range(dimension):
                        localMatrix[localI, localJ] = C[i * dimension + localI,
                                                        j * dimension + localJ]
                ott.assert_almost_equal(localMatrix,
                                        myModel(vertices[i], vertices[j]),
                                        1e-14, 1e-14)

    if test_partial_grad:
        grad = myModel.partialGradient(x1, x2)

        if (dimension == 1):
            gradfd = ot.Matrix(inputDimension, 1)
            for j in range(inputDimension):
                x1_g = ot.Point(x1)
                x1_d = ot.Point(x1)
                x1_g[j] = x1_d[j] + eps
                x1_d[j] = x1_d[j] - eps
                gradfd[j, 0] = (myModel.computeAsScalar(x1_g, x2) -
                                myModel.computeAsScalar(x1_d, x2)) / (2 * eps)
        else:
            gradfd = ot.Matrix(inputDimension, dimension * dimension)
            covarianceX1X2 = myModel(x1, x2)
            centralValue = ot.Point(covarianceX1X2.getImplementation())
            # Loop over the shifted points
            for i in range(inputDimension):
                currentPoint = ot.Point(x1)
                currentPoint[i] += eps
                localCovariance = myModel(currentPoint, x2)
                currentValue = ot.Point(localCovariance.getImplementation())
                for j in range(currentValue.getSize()):
                    gradfd[i, j] = (currentValue[j] - centralValue[j]) / eps

        ott.assert_almost_equal(grad, gradfd, 1e-5, 1e-5,
                                "in " + myModel.getClassName() + " grad")
コード例 #21
0
from matplotlib import pyplot as plt
from openturns.viewer import View
from math import sqrt

domain = ot.Interval(-1.0, 1.0)
basis = ot.OrthogonalProductFunctionFactory([ot.FourierSeriesFactory()])
basisSize = 10
experiment = ot.GaussProductExperiment(basis.getMeasure(), [20])
mustScale = False
threshold = 0.001
factory = ot.KarhunenLoeveQuadratureFactory(domain, experiment, basis,
                                            basisSize, mustScale, threshold)
model = ot.AbsoluteExponential([1.0])
ev = ot.NumericalPoint()
functions = factory.build(model, ev)
g = ot.Graph()
g.setXTitle("$t$")
g.setYTitle("$\sqrt{\lambda_n}\phi_n$")
for i in range(functions.getSize()):
    g.add((functions.build(i) * ot.LinearNumericalMathFunction(
        ot.NumericalPoint(domain.getDimension()),
        ot.NumericalPoint(1, sqrt(ev[i])), ot.Matrix(
            1, domain.getDimension()))).draw(-1.0, 1.0, 256))
g.setColors(ot.Drawable.BuildDefaultPalette(functions.getSize()))

fig = plt.figure(figsize=(6, 4))
plt.suptitle("P1 approx. of KL expansion for $C(s,t)=e^{-|s-t|}$")
axis = fig.add_subplot(111)
axis.set_xlim(auto=True)
View(g, figure=fig, axes=[axis], add_legend=False)
コード例 #22
0
#! /usr/bin/env python

import openturns as ot
import pickle
from io import BytesIO

obj_list = []
obj_list.append(ot.Point([1.6, -8.7]))
obj_list.append(ot.Sample([[4.6, -3.7], [8.4, 6.3]]))
obj_list.append(ot.Description(['x', 'y', 'z']))
obj_list.append(ot.Indices([1, 2, 4]))
obj_list.append(ot.Matrix([[1, 2], [3, 4]]))
obj_list.append(ot.SymbolicFunction(['x1', 'x2'], ['y1=x1+x2']))

src = BytesIO()

for obj in obj_list:
    pickle.dump(obj, src)

src.seek(0)

for obj in obj_list:
    obj2 = pickle.load(src)
    print(('object: ' + str(obj)))
    print(('same: ' + str(obj2 == obj) + '\n'))
コード例 #23
0
def test_model(myModel, test_partial_grad=True, x1=None, x2=None):

    inputDimension = myModel.getInputDimension()
    dimension = myModel.getOutputDimension()

    if x1 is None and x2 is None:
        x1 = ot.Point(inputDimension)
        x2 = ot.Point(inputDimension)
        for j in range(inputDimension):
            x1[j] = -1.0 - j
            x2[j] = 3.0 + 2.0 * j
    else:
        x1 = ot.Point(x1)
        x2 = ot.Point(x2)

    if myModel.isStationary():
        ott.assert_almost_equal(myModel(x1 - x2), myModel(x1, x2), 1e-14,
                                1e-14)
        ott.assert_almost_equal(myModel(x2 - x1), myModel(x1, x2), 1e-14,
                                1e-14)

    eps = 1e-3

    mesh = ot.IntervalMesher([7] * inputDimension).build(
        ot.Interval([-10] * inputDimension, [10] * inputDimension))

    C = myModel.discretize(mesh)
    if dimension == 1:
        # Check that discretize & computeAsScalar provide the
        # same values
        vertices = mesh.getVertices()
        for j in range(len(vertices)):
            for i in range(j, len(vertices)):
                ott.assert_almost_equal(
                    C[i, j], myModel.computeAsScalar(vertices[i], vertices[j]),
                    1e-14, 1e-14)
    else:
        # Check that discretize & operator() provide the same values
        vertices = mesh.getVertices()
        localMatrix = ot.SquareMatrix(dimension)
        for j in range(len(vertices)):
            for i in range(j, len(vertices)):
                for localJ in range(dimension):
                    for localI in range(dimension):
                        localMatrix[localI, localJ] = C[i * dimension + localI,
                                                        j * dimension + localJ]
                ott.assert_almost_equal(localMatrix,
                                        myModel(vertices[i], vertices[j]),
                                        1e-14, 1e-14)

    # Now we suppose that discretize is ok
    # we look at crossCovariance of (vertices, vertices) which should return the same values
    C.getImplementation().symmetrize()
    crossCov = myModel.computeCrossCovariance(vertices, vertices)
    ott.assert_almost_equal(
        crossCov, C, 1e-14, 1e-14,
        "in " + myModel.getClassName() + "::computeCrossCovariance")

    # Now crossCovariance(sample, sample) is ok
    # Let us validate crossCovariance(Sample, point) with 1st column(s) of previous calculations
    crossCovSamplePoint = myModel.computeCrossCovariance(vertices, vertices[0])
    crossCovCol = crossCov.reshape(crossCov.getNbRows(), dimension)
    ott.assert_almost_equal(
        crossCovSamplePoint, crossCovCol, 1e-14, 1e-14,
        "in " + myModel.getClassName() + "::computeCrossCovarianceSamplePoint")

    if test_partial_grad:
        grad = myModel.partialGradient(x1, x2)

        if (dimension == 1):
            gradfd = ot.Matrix(inputDimension, 1)
            for j in range(inputDimension):
                x1_g = ot.Point(x1)
                x1_d = ot.Point(x1)
                x1_g[j] = x1_d[j] + eps
                x1_d[j] = x1_d[j] - eps
                gradfd[j, 0] = (myModel.computeAsScalar(x1_g, x2) -
                                myModel.computeAsScalar(x1_d, x2)) / (2 * eps)
        else:
            gradfd = ot.Matrix(inputDimension, dimension * dimension)
            covarianceX1X2 = myModel(x1, x2)
            centralValue = ot.Point(covarianceX1X2.getImplementation())
            # Loop over the shifted points
            for i in range(inputDimension):
                currentPoint = ot.Point(x1)
                currentPoint[i] += eps
                localCovariance = myModel(currentPoint, x2)
                currentValue = ot.Point(localCovariance.getImplementation())
                for j in range(currentValue.getSize()):
                    gradfd[i, j] = (currentValue[j] - centralValue[j]) / eps

        ott.assert_almost_equal(grad, gradfd, 1e-5, 1e-5,
                                "in " + myModel.getClassName() + " grad")
コード例 #24
0
collFactories = [
    ot.UniformFactory(),
    ot.NormalFactory(),
    ot.TriangularFactory(),
    ot.ExponentialFactory(),
    ot.GammaFactory()
]
#, TrapezoidalFactory()
result, norms = distribution.project(collFactories)
print("projections=", result)
print("norms=", norms)
# ------------------------------ Multivariate tests ------------------------------#
# 2D RandomMixture
collection = [ot.Normal(0.0, 1.0)] * 3

weightMatrix = ot.Matrix(2, 3)
weightMatrix[0, 0] = 1.0
weightMatrix[0, 1] = -2.0
weightMatrix[0, 2] = 1.0
weightMatrix[1, 0] = 1.0
weightMatrix[1, 1] = 1.0
weightMatrix[1, 2] = -3.0

# Build the RandomMixture
distribution2D = ot.RandomMixture(collection, weightMatrix)
print("distribution = ", distribution2D)
print("range = ", distribution2D.getRange())
print("mean = ", distribution2D.getMean())
print("cov = ", distribution2D.getCovariance())
print("sigma = ", distribution2D.getStandardDeviation())
distribution2D.setBlockMin(3)
コード例 #25
0
#! /usr/bin/env python

import openturns as ot

ref_values = [[1.0, 0.0], [0.0, 0.5]]

mats = [ot.Matrix(ref_values),
        ot.SquareMatrix(ref_values),
        ot.TriangularMatrix(ref_values),
        ot.SymmetricMatrix(ref_values),
        ot.CovarianceMatrix(ref_values),
        ot.CorrelationMatrix(ref_values)]
mats.extend([
    ot.ComplexMatrix(ref_values),
    ot.HermitianMatrix(ref_values),
    ot.TriangularComplexMatrix(ref_values),
    ot.SquareComplexMatrix(ref_values)])

for a in mats:

    # try conversion
    ref = ot.Matrix([[1.0, 0.0], [0.0, 0.5]])
    iname = a.__class__.__name__
    print('a=', a)

    # try scalar mul
    try:
        s = 5.
        ats = a * s
        print('a*s=', ats)
        sta = s * a
コード例 #26
0
# .. math::
#    f : \underline{X} \mapsto \underline{\underline{A}} ( \underline{X} - \underline{b} ) + \underline{c} + \frac{1}{2} \underline{X}^T \times \underline{\underline{\underline{M}}} \times \underline{X}
#

# %%
import openturns as ot
import openturns.viewer as viewer
from matplotlib import pylab as plt
import math as m
ot.Log.Show(ot.Log.NONE)

# %%
# create a quadratic function
inputDimension = 3
outputDimension = 2
center = [1.0] * inputDimension
constant = [-1.0, 2.0]  # c
linear = ot.Matrix(inputDimension, outputDimension)  # A
quadratic = ot.SymmetricTensor(inputDimension, outputDimension)  # M
quadratic[0, 0, 1] = 3.0
function = ot.QuadraticFunction(center, constant, linear, quadratic)
x = [7.0, 8.0, 9.0]
print(function(x))

# %%
# draw y1 with x1=2.0, x2=1.0, x0 in [0, 2]
graph = ot.ParametricFunction(
    function, [1, 2], [2.0, 1.0]).getMarginal(1).draw(0.0, 2.0)
view = viewer.View(graph)
plt.show()
コード例 #27
0
    def run(self):
        """
        Build the POD models.

        Notes
        -----
        This method build the polynomial chaos model. First the censored data
        are filtered if needed. The Box Cox transformation is performed if it is
        enabled. Then it builds the POD models, the Monte Carlo simulation is
        performed for each given defect sizes. The confidence interval is 
        computed by simulating new coefficients of the polynomial chaos, then
        Monte Carlo simulations are performed.
        """

        # run the chaos algorithm and get result if not given
        if not self._userChaos:
            if self._verbose:
                print('Start build polynomial chaos model...')
            self._algoChaos = self._buildChaosAlgo(self._input, self._signals)
            self._algoChaos.run()
            if self._verbose:
                print('Polynomial chaos model completed')
            self._chaosResult = self._algoChaos.getResult()

            # get the metamodel
        self._chaosPred = self._chaosResult.getMetaModel()
        # get the basis, coef and transformation, needed for the confidence interval
        self._chaosCoefs = self._chaosResult.getCoefficients()
        self._reducedBasis = self._chaosResult.getReducedBasis()
        self._transformation = self._chaosResult.getTransformation()
        self._basisFunction = ot.NumericalMathFunction(
            ot.NumericalMathFunction(self._reducedBasis), self._transformation)

        # compute the residuals and stderr
        inputSize = self._input.getSize()
        basisSize = self._reducedBasis.getSize()
        self._residuals = self._signals - self._chaosPred(
            self._input)  # residuals
        self._stderr = np.sqrt(
            np.sum(np.array(self._residuals)**2) / (inputSize - basisSize - 1))

        # Check the quality of the chaos model
        R2 = self.getR2()
        Q2 = self.getQ2()
        if self._verbose:
            print('Polynomial chaos validation R2 (>0.8) : {:0.4f}'.format(R2))
            print('Polynomial chaos validation Q2 (>0.8) : {:0.4f}'.format(Q2))

        # Compute the POD values for each defect sizes
        self.POD = self._computePOD(self._defectSizes, self._chaosCoefs)
        # create the interpolate function
        interpModel = interp1d(self._defectSizes, self.POD, kind='linear')
        self._PODmodel = ot.PythonFunction(1, 1, interpModel)

        ####################### confidence interval ############################
        dof = inputSize - basisSize - 1
        varEpsilon = (ot.ChiSquare(dof).inverse() * dof *
                      self._stderr**2).getRealization()[0]
        gramBasis = ot.Matrix(self._basisFunction(self._input)).computeGram()
        covMatrix = gramBasis.solveLinearSystem(
            ot.IdentityMatrix(basisSize)) * varEpsilon
        self._coefsDist = ot.Normal(
            np.hstack(self._chaosCoefs),
            ot.CovarianceMatrix(covMatrix.getImplementation()))
        coefsRandom = self._coefsDist.getSample(self._simulationSize)

        self._PODPerDefect = ot.NumericalSample(self._simulationSize,
                                                self._defectNumber)
        for i, coefs in enumerate(coefsRandom):
            self._PODPerDefect[i, :] = self._computePOD(
                self._defectSizes, coefs)
            if self._verbose:
                updateProgress(i, self._simulationSize,
                               'Computing POD per defect')
コード例 #28
0
import openturns as ot
from openturns.viewer import View

center = [0.0]
constant = [3.0]
linear = ot.Matrix([[2.0]])
f = ot.LinearFunction(center, constant, linear)

graph = f.draw(0.0, 10.0)
graph.setTitle('$y=2x+3$')
View(graph, figure_kwargs={'figsize': (8, 4)}, add_legend=True)
コード例 #29
0
# %%
# We see that there is a large covariance matrix diagonal. 
#
# Let us compute a 95% confidence interval for the solution :math:`\theta^\star`.

# %%
print(distributionPosterior.computeBilateralConfidenceIntervalWithMarginalProbability(0.95)[0])

# %%
# The confidence interval is *very* large. In order to clarify the situation, we compute the Jacobian matrix of the model at the candidate point. 

# %%
mycf.setParameter(thetaPrior)
thetaDim = len(thetaPrior)
jacobianMatrix = ot.Matrix(nbobs,thetaDim)
for i in range(nbobs):
    jacobianMatrix[i,:] = mycf.parameterGradient(Qobs[i]).transpose()
print(jacobianMatrix[0:5,:])

# %%
# The rank of the problem can be seen from the singular values of the Jacobian matrix. 

# %%
print(jacobianMatrix.computeSingularValues())

# %%
# We can see that there are two singular values which are relatively close to zero. 
#
# This explains why the Jacobian matrix is close to being rank-degenerate.
#
コード例 #30
0
print("determinant=%.6f" % determinant)

ev = ot.ScalarCollection(2)
ev = matrix1.computeEigenValues()
print("ev=" + repr(ev))

if matrix1.isPositiveDefinite():
    isSPD = "true"
else:
    isSPD = "false"
print("isSPD=", isSPD)

matrix2 = matrix1.computeCholesky()
print("matrix2=" + repr(matrix2))

b = ot.Matrix(2, 3)
b[0, 0] = 5.0
b[1, 0] = 0.0
b[0, 1] = 10.0
b[1, 1] = 1.0
b[0, 2] = 15.0
b[1, 2] = 2.0
result2 = matrix1.solveLinearSystem(b, True)
print("result2=" + repr(result2))

matrix3 = ot.CovarianceMatrix(3)
matrix3[1, 0] = float('nan')
try:
    print("ev=", matrix3.computeSingularValues())
except:
    print("ok")