def testTensorProductExperiment1():
    # Generate a tensorized Gauss-Legendre rule in 2 dimensions.
    # The first marginal has 3 nodes.
    # The first marginal has 5 nodes.
    experiment1 = ot.GaussProductExperiment(ot.Uniform(0.0, 1.0), [3])
    experiment2 = ot.GaussProductExperiment(ot.Uniform(0.0, 1.0), [5])
    collection = [experiment1, experiment2]
    multivariateExperiment = ot.TensorProductExperiment(collection)
    nodes, weights = multivariateExperiment.generateWithWeights()
    # Sort
    nodes, weights = sortNodesAndWeights(nodes, weights)
    nodesExact = [
        [0.11270, 0.04691],
        [0.11270, 0.23076],
        [0.11270, 0.5],
        [0.11270, 0.76923],
        [0.11270, 0.95309],
        [0.5, 0.04691],
        [0.5, 0.23076],
        [0.5, 0.5],
        [0.5, 0.76923],
        [0.5, 0.95309],
        [0.88729, 0.04691],
        [0.88729, 0.23076],
        [0.88729, 0.5],
        [0.88729, 0.76923],
        [0.88729, 0.95309],
    ]
    weightsExact = [
        0.0329065,
        0.0664762,
        0.0790123,
        0.0664762,
        0.0329065,
        0.0526504,
        0.106362,
        0.12642,
        0.106362,
        0.0526504,
        0.0329065,
        0.0664762,
        0.0790123,
        0.0664762,
        0.0329065,
    ]
    rtol = 0.0
    atol = 1.e-5
    ott.assert_almost_equal(nodes, nodesExact, rtol, atol)
    ott.assert_almost_equal(weights, weightsExact, rtol, atol)
    #
    size = multivariateExperiment.getSize()
    assert size == 15
    #
    distribution = multivariateExperiment.getDistribution()
    collection = [ot.Uniform(0.0, 1.0), ot.Uniform(0.0, 1.0)]
    expected_distribution = ot.BlockIndependentDistribution(collection)
    assert distribution == expected_distribution
def test_two_inputs_one_output():
    # Kriging use case
    inputDimension = 2

    # Learning data
    levels = [8, 5]
    box = ot.Box(levels)
    inputSample = box.generate()
    # Scale each direction
    inputSample *= 10.0

    model = ot.SymbolicFunction(['x', 'y'], ['cos(0.5*x) + sin(y)'])
    outputSample = model(inputSample)

    # Validation
    sampleSize = 10
    inputValidSample = ot.ComposedDistribution(
        2 * [ot.Uniform(0, 10.0)]).getSample(sampleSize)
    outputValidSample = model(inputValidSample)

    # 2) Definition of exponential model
    # The parameters have been calibrated using TNC optimization
    # and AbsoluteExponential models
    covarianceModel = ot.SquaredExponential([5.33532, 2.61534], [1.61536])

    # 3) Basis definition
    basis = ot.ConstantBasisFactory(inputDimension).build()
    # Kriging algorithm
    algo = ot.KrigingAlgorithm(inputSample, outputSample,
                               covarianceModel, basis)
    algo.run()
    result = algo.getResult()
    # Get meta model
    metaModel = result.getMetaModel()
    outData = metaModel(inputValidSample)

    # 4) Errors
    # Interpolation
    ott.assert_almost_equal(
        outputSample,  metaModel(inputSample), 3.0e-5, 3.0e-5)

    # 5) Kriging variance is 0 on learning points
    covariance = result.getConditionalCovariance(inputSample)
    ott.assert_almost_equal(
        covariance, ot.SquareMatrix(len(inputSample)), 7e-7, 7e-7)

    # Covariance per marginal & extract variance component
    coll = result.getConditionalMarginalCovariance(inputSample)
    var = [mat[0, 0] for mat in coll]
    ott.assert_almost_equal(var, [0]*len(var), 1e-14, 1e-14)

    # Variance per marginal
    var = result.getConditionalMarginalVariance(inputSample)
    ott.assert_almost_equal(var, ot.Point(len(inputSample)), 1e-14, 1e-14)
    # Estimation
    ott.assert_almost_equal(outputValidSample,  metaModel(
        inputValidSample), 1.e-1, 1e-1)
Esempio n. 3
0
 def test_computeLogPDF(self):
     """Test InverseWishart.computeLogPDF"""
     dimension, DoF = self.dimension, self.DoF
     Scale, determinant = self.Scale, self.determinant
     inverse_wishart = ot.InverseWishart(Scale, DoF)
     logPDFatX = - self.logmultigamma(dimension, 0.5 * DoF) \
         - 0.5 * (DoF * dimension * log(2.) + dimension
                  + (dimension + 1) * log(determinant))
     assert_almost_equal(inverse_wishart.computeLogPDF(Scale), logPDFatX)
 def test_computeLogPDF(self):
     """Test InverseWishart.computeLogPDF"""
     dimension, DoF = self.dimension, self.DoF
     Scale, determinant = self.Scale, self.determinant
     inverse_wishart = ot.InverseWishart(Scale, DoF)
     logPDFatX = - self.logmultigamma(dimension, 0.5*DoF) \
               - 0.5*(DoF*dimension*log(2.) + dimension \
                     + (dimension + 1)*log(determinant))
     assert_almost_equal(inverse_wishart.computeLogPDF(Scale), logPDFatX)
Esempio n. 5
0
def check_get_value():
    with open('result.txt', 'w') as f:
        f.write('name,X0,X1,X2,X3,X4,X5\n')
        f.write('val,0.125,1.1,2.3,3.5,4.7,5.99\n')
    ott.assert_almost_equal(
        ct.get_value('result.txt', skip_line=1, skip_col=3, col_sep=','), 2.3)
    ott.assert_almost_equal(
        ct.get_value('result.txt', skip_line=1, skip_col=-1, col_sep=','),
        5.99)
    os.remove('result.txt')
def test_stationary_fun():
    # fix https://github.com/openturns/openturns/issues/1861
    ot.RandomGenerator.SetSeed(0)
    rho = ot.SymbolicFunction("tau", "exp(-abs(tau))*cos(2*pi_*abs(tau))")
    model = ot.StationaryFunctionalCovarianceModel([1], [1], rho)
    x = ot.Normal().getSample(20)
    y = x + ot.Normal(0, 0.1).getSample(20)

    algo = ot.KrigingAlgorithm(x, y, model, ot.LinearBasisFactory().build())
    algo.run()
    result = algo.getResult()
    variance = result.getConditionalMarginalVariance(x)
    ott.assert_almost_equal(variance, ot.Sample(len(x), 1), 1e-16, 1e-16)
Esempio n. 7
0
 def test_getSample_getMean(self):
     """Test InverseWishart.getSample and InverseWishart.getMean"""
     d, Scale, DoF, N = self.dimension, self.Scale, self.DoF, int(1E+4)
     Identity = ot.CovarianceMatrix(d)
     Scale_wishart = ot.CovarianceMatrix(Scale.solveLinearSystem(Identity))
     inverse_wishart = ot.InverseWishart(Scale, DoF)
     sample_inverse = ot.Sample(N, (d * (d + 1)) // 2)
     sample = ot.Sample(N, (d * (d + 1)) // 2)
     for i in range(N):
         M_inverse = inverse_wishart.getRealizationAsMatrix()
         M = M_inverse.solveLinearSystem(Identity)
         indice = 0
         for j in range(d):
             for k in range(j + 1):
                 sample_inverse[i, indice] = M_inverse[k, j]
                 sample[i, indice] = M[k, j]
                 indice += 1
     mean_inverse = sample_inverse.computeMean()
     mean = sample.computeMean()
     theoretical_mean_inverse = inverse_wishart.getMean()
     theoretical_mean = (ot.Wishart(Scale_wishart, DoF)).getMean()
     indice, coefficient = 0, 1. / (DoF - d - 1)
     for j in range(d):
         for k in range(j + 1):
             assert_almost_equal(theoretical_mean_inverse[indice],
                                 coefficient * Scale[k, j])
             assert_almost_equal(theoretical_mean[indice],
                                 DoF * Scale_wishart[k, j])
             assert_almost_equal(mean_inverse[indice],
                                 coefficient * Scale[k, j], 0.15, 1.E-3)
             assert_almost_equal(mean[indice],
                                 DoF * Scale_wishart[k, j], 0.15, 1.E-3)
             indice += 1
def test_two_outputs():
    f = ot.SymbolicFunction(['x'], ['x * sin(x)', 'x * cos(x)'])
    sampleX = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]
    sampleY = f(sampleX)
    basis = ot.Basis([ot.SymbolicFunction(['x'], ['x']),
                      ot.SymbolicFunction(['x'], ['x^2'])])
    covarianceModel = ot.SquaredExponential([1.0])
    covarianceModel.setActiveParameter([])
    algo = ot.KrigingAlgorithm(sampleX, sampleY, covarianceModel, basis)
    algo.run()
    result = algo.getResult()
    mm = result.getMetaModel()
    assert mm.getOutputDimension() == 2, "wrong output dim"
    ott.assert_almost_equal(mm(sampleX), sampleY)
 def test_getSample_getMean(self):
     """Test InverseWishart.getSample and InverseWishart.getMean"""
     d, Scale, DoF, N = self.dimension, self.Scale, self.DoF, int(1E+5)
     Identity = ot.CovarianceMatrix(d)
     Scale_wishart = ot.CovarianceMatrix(Scale.solveLinearSystem(Identity))
     inverse_wishart = ot.InverseWishart(Scale, DoF)
     sample_inverse = ot.NumericalSample(N, (d*(d+1))//2)
     sample = ot.NumericalSample(N, (d*(d+1))//2)
     for i in range(N):
         M_inverse = inverse_wishart.getRealizationAsMatrix()
         M = M_inverse.solveLinearSystem(Identity)
         indice = 0
         for j in range(d):
             for k in range(j+1):
                 sample_inverse[i, indice] = M_inverse[k, j]
                 sample[i, indice] = M[k, j]
                 indice += 1
     mean_inverse = sample_inverse.computeMean()
     mean = sample.computeMean()
     theoretical_mean_inverse = inverse_wishart.getMean()
     theoretical_mean = (ot.Wishart(Scale_wishart, DoF)).getMean()
     indice, coefficient = 0, 1./(DoF - d - 1)
     for j in range(d):
         for k in range(j+1):
             assert_almost_equal(theoretical_mean_inverse[indice],
                                 coefficient*Scale[k, j])
             assert_almost_equal(theoretical_mean[indice],
                                 DoF*Scale_wishart[k, j])
             assert_almost_equal(mean_inverse[indice],
                                 coefficient*Scale[k, j], 0.1, 1.E-3)
             assert_almost_equal(mean[indice],
                                 DoF*Scale_wishart[k, j], 0.1, 1.E-3)
             indice += 1
 def test_computeLogPDF_diagonal_case(self):
     """Test InverseWishart.computeLogPDF in the case of diagonal matrices"""
     dimension, DoF = self.dimension, self.DoF
     k = 0.5*(DoF + dimension - 1)
     diagX = ot.Uniform(0.5, 1.).getSample(dimension)
     Scale = ot.CovarianceMatrix(dimension)
     X = ot.CovarianceMatrix(dimension)
     for d in range(dimension):
         Scale[d, d], X[d, d] = self.Scale[d, d], diagX[d, 0]
     inverse_wishart = ot.InverseWishart(Scale, DoF)
     logdensity = inverse_wishart.computeLogPDF(X)
     logratio = - self.logmultigamma(dimension, 0.5*DoF) \
              + dimension*ot.SpecFunc_LnGamma(0.5*(DoF+dimension-1))
     for d in range(dimension):
         inverse_gamma = ot.InverseGamma(k, 2./Scale[d, d])
         logdensity = logdensity - inverse_gamma.computeLogPDF(diagX[d, 0])
         logratio = logratio + 0.5*(1-dimension)*log(0.5*Scale[d, d])
     assert_almost_equal(logdensity, logratio)
Esempio n. 11
0
def test_scalar_model(myModel, x1=None, x2=None):
    if x1 is None and x2 is None:
        x1 = 2.0
        x2 = -3.0
    # Check that computeAsScalar(Scalar) == computeAsScalar(Point)
    ott.assert_almost_equal(myModel.computeAsScalar([x1], [x2]),
                            myModel.computeAsScalar(x1, x2), 1.0e-14, 1.0e-14)

    # Gradient testing
    eps = 1e-5

    grad = myModel.partialGradient([x1], [x2])[0, 0]

    x1_g = x1 + eps
    x1_d = x1 - eps
    gradfd = (myModel.computeAsScalar(x1_g, x2) -
              myModel.computeAsScalar(x1_d, x2)) / (2.0 * eps)
    ott.assert_almost_equal(gradfd, grad, 1e-5, 1e-5)
Esempio n. 12
0
 def test_computeLogPDF_diagonal_case(self):
     """Test InverseWishart.computeLogPDF in the case of diagonal matrices"""
     dimension, DoF = self.dimension, self.DoF
     k = 0.5 * (DoF + dimension - 1)
     diagX = ot.Uniform(0.5, 1.).getSample(dimension)
     Scale = ot.CovarianceMatrix(dimension)
     X = ot.CovarianceMatrix(dimension)
     for d in range(dimension):
         Scale[d, d], X[d, d] = self.Scale[d, d], diagX[d, 0]
     inverse_wishart = ot.InverseWishart(Scale, DoF)
     logdensity = inverse_wishart.computeLogPDF(X)
     logratio = - self.logmultigamma(dimension, 0.5 * DoF) \
         + dimension * ot.SpecFunc_LnGamma(0.5 * (DoF + dimension - 1))
     for d in range(dimension):
         inverse_gamma = ot.InverseGamma(k, 2. / Scale[d, d])
         logdensity = logdensity - inverse_gamma.computeLogPDF(diagX[d, 0])
         logratio = logratio + 0.5 * \
             (1 - dimension) * log(0.5 * Scale[d, d])
     assert_almost_equal(logdensity, logratio)
Esempio n. 13
0
def test_solver(solver):
    relEps = solver.getRelativeError()
    absEps = solver.getAbsoluteError()

    def test_f1(x):
        y = exp(x[0]) - 1.9151695967140057e-174
        return [y]

    f1 = ot.PythonFunction(1, 1, test_f1)
    root = solver.solve(f1, 0.0, -450.0, -350.0)
    ott.assert_almost_equal(root, -400.0, relEps, absEps)

    def test_f2(x):
        y = exp(x[0]) - 5.221469689764144e+173
        return [y]

    f2 = ot.PythonFunction(1, 1, test_f2)
    root = solver.solve(f2, 0.0, 350.0, 450.0)
    ott.assert_almost_equal(root, 400.0, relEps, absEps)
Esempio n. 14
0
 def test_ZeroMean(self):
     # Create the KL result
     numberOfVertices = 10
     interval = ot.Interval(-1.0, 1.0)
     mesh = ot.IntervalMesher([numberOfVertices - 1]).build(interval)
     covariance = ot.SquaredExponential()
     process = ot.GaussianProcess(covariance, mesh)
     sampleSize = 10
     processSample = process.getSample(sampleSize)
     threshold = 0.0
     algo = ot.KarhunenLoeveSVDAlgorithm(processSample, threshold)
     algo.run()
     klresult = algo.getResult()
     # Create the KL reduction
     meanField = processSample.computeMean()
     klreduce = ot.KarhunenLoeveReduction(klresult)
     # Generate a trajectory and reduce it
     field = process.getRealization()
     values = field.getValues()
     reducedValues = klreduce(values)
     ott.assert_almost_equal(values, reducedValues)
Esempio n. 15
0
def test_two_outputs():
    f = ot.SymbolicFunction(['x'], ['x * sin(x)', 'x * cos(x)'])
    sampleX = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]
    sampleY = f(sampleX)
    basis = ot.Basis([
        ot.SymbolicFunction(['x'], ['x']),
        ot.SymbolicFunction(['x'], ['x^2'])
    ])
    covarianceModel = ot.SquaredExponential([1.0])
    covarianceModel.setActiveParameter([])
    algo = ot.KrigingAlgorithm(sampleX, sampleY, covarianceModel, basis)
    algo.run()
    result = algo.getResult()
    mm = result.getMetaModel()
    assert mm.getOutputDimension() == 2, "wrong output dim"
    ott.assert_almost_equal(mm(sampleX), sampleY)
    # Check the conditional covariance
    reference_covariance = ot.Matrix([[4.4527, 0.0, 8.34404, 0.0],
                                      [0.0, 2.8883, 0.0, 5.41246],
                                      [8.34404, 0.0, 15.7824, 0.0],
                                      [0.0, 5.41246, 0.0, 10.2375]])
    ott.assert_almost_equal(
        result([[9.5], [10.0]]).getCovariance() - reference_covariance,
        ot.Matrix(4, 4), 0.0, 2e-2)
 def test_computeLogPDF_1D_case(self):
     """Test InverseWishart.computeLogPDF in the one-dimensional case"""
     k, beta = self.k, self.beta
     def logPDF(x):
         if x <= 0.:
             raise ValueError("math domain error")
         return k*log(beta) - ot.SpecFunc_LogGamma(k) - (k+1)*log(x) - beta/x
     data = ((self.inverse_gamma.drawPDF()).getDrawable(0)).getData()
     i = 0
     while data[i, 0] <= 0.:
         i += 1
     for d in data[i:, 0]:
         x = d[0]
         logPDFx = logPDF(x)
         logPDFx_IW = self.one_dimensional_inverse_wishart.computeLogPDF(x)
         logPDFx_IG = self.inverse_gamma.computeLogPDF(x)
         assert_almost_equal(logPDFx_IW, logPDFx)
         assert_almost_equal(logPDFx_IG, logPDFx)
         assert_almost_equal(logPDFx_IW, logPDFx_IG)
Esempio n. 17
0
    def test_computeLogPDF_1D_case(self):
        """Test InverseWishart.computeLogPDF in the one-dimensional case"""
        k, beta = self.k, self.beta

        def logPDF(x):
            if x <= 0.:
                raise ValueError("math domain error")
            return k * log(beta) - ot.SpecFunc_LogGamma(k) - (k + 1) * log(x) - beta / x
        data = ((self.inverse_gamma.drawPDF()).getDrawable(0)).getData()
        i = 0
        while data[i, 0] <= 0.:
            i += 1
        for d in data[i:, 0]:
            x = d[0]
            logPDFx = logPDF(x)
            logPDFx_IW = self.one_dimensional_inverse_wishart.computeLogPDF(x)
            logPDFx_IG = self.inverse_gamma.computeLogPDF(x)
            assert_almost_equal(logPDFx_IW, logPDFx)
            assert_almost_equal(logPDFx_IG, logPDFx)
            assert_almost_equal(logPDFx_IW, logPDFx_IG)
Esempio n. 18
0
def test_one_input_one_output():
    sampleSize = 6
    dimension = 1

    f = ot.SymbolicFunction(['x0'], ['x0 * sin(x0)'])

    X = ot.Sample(sampleSize, dimension)
    X2 = ot.Sample(sampleSize, dimension)
    for i in range(sampleSize):
        X[i, 0] = 3.0 + i
        X2[i, 0] = 2.5 + i
    X[0, 0] = 1.0
    X[1, 0] = 3.0
    X2[0, 0] = 2.0
    X2[1, 0] = 4.0
    Y = f(X)
    Y2 = f(X2)

    # create algorithm
    basis = ot.ConstantBasisFactory(dimension).build()
    covarianceModel = ot.SquaredExponential([1e-02], [4.50736])

    algo = ot.KrigingAlgorithm(X, Y, covarianceModel, basis)
    algo.run()

    # perform an evaluation
    result = algo.getResult()

    ott.assert_almost_equal(result.getMetaModel()(X), Y)
    ott.assert_almost_equal(result.getResiduals(), [1.32804e-07], 1e-3, 1e-3)
    ott.assert_almost_equal(result.getRelativeErrors(), [5.20873e-21])

    # Kriging variance is 0 on learning points
    covariance = result.getConditionalCovariance(X)
    covariancePoint = ot.Point(covariance.getImplementation())
    theoricalVariance = ot.Point(sampleSize * sampleSize)
    ott.assert_almost_equal(covariance, ot.Matrix(sampleSize, sampleSize),
                            8.95e-7, 8.95e-7)

    # Covariance per marginal & extract variance component
    coll = result.getConditionalMarginalCovariance(X)
    var = [mat[0, 0] for mat in coll]
    ott.assert_almost_equal(var, [0] * sampleSize, 1e-14, 1e-14)

    # Variance per marginal
    var = result.getConditionalMarginalVariance(X)
    ott.assert_almost_equal(var, ot.Point(sampleSize), 1e-14, 1e-14)
def test_parameters_iso():

    scale = []
    amplitude = 1.0
    extraParameter = []

    # model 1
    atom_ex = ot.IsotropicCovarianceModel(ot.MaternModel(), 2)
    atom_ex.setScale([5])
    atom_ex.setAmplitude([1.5])
    scale.append(5)
    amplitude *= 1.5
    extraParameter.append(atom_ex.getKernel().getFullParameter()[-1])

    # model2
    m = ot.MaternModel()
    m.setNu(2.5)
    m.setScale([3])
    m.setAmplitude([3])
    scale.append(3)
    amplitude *= 3
    extraParameter.append(m.getNu())

    # model 3
    atom = ot.IsotropicCovarianceModel(ot.AbsoluteExponential(), 2)
    atom.setScale([2])
    atom.setAmplitude([2.5])
    scale.append(2)
    amplitude *= 2.5

    model = ot.ProductCovarianceModel([atom_ex, m, atom])

    ott.assert_almost_equal(model.getScale(), scale, 1e-16, 1e-16)
    ott.assert_almost_equal(model.getAmplitude(), [amplitude], 1e-16, 1e-16)
    ott.assert_almost_equal(model.getFullParameter(),
                            scale + [amplitude] + extraParameter, 1e-16, 1e-16)

    # active parameter should be scale + amplitude
    ott.assert_almost_equal(model.getActiveParameter(),
                            [0, 1, 2, 3], 1e-16, 1e-16)

    # setting new parameters
    extraParameter = [2.5, 0.5]
    model.setFullParameter([6, 7, 8, 2] + extraParameter)

    ott.assert_almost_equal(model.getCollection()[
                            0].getScale()[0], 6, 1e-16, 1e-16)
    ott.assert_almost_equal(model.getCollection()[
                            1].getScale()[0], 7, 1e-16, 1e-16)
    ott.assert_almost_equal(model.getCollection()[
                            2].getScale()[0], 8, 1e-16, 1e-16)
    ott.assert_almost_equal(model.getAmplitude()[0], 2, 1e-16, 1e-16)
    ott.assert_almost_equal(model.getCollection(
    )[0].getFullParameter()[-1], extraParameter[0], 1e-16, 1e-16)
    ott.assert_almost_equal(model.getCollection(
    )[1].getFullParameter()[-1], extraParameter[1], 1e-16, 1e-16)

    # checking active par setting
    model.setActiveParameter([0, 1, 2, 3, 5])
    ott.assert_almost_equal(model.getParameter(), [
                            6, 7, 8, 2, extraParameter[-1]], 1e-16, 1e-16)
Esempio n. 20
0
# algo.setMaximumCoefficientOfVariation(1e-6)
algo.setStandardDeviationCriterionType('MAX')
algo.setCoefficientOfVariationCriterionType('NONE')
# algo.setMaximumStandardDeviation(1.6)
# print(algo.getMaximumStandardDeviation())
# algo.setProgressCallback(progress)
# algo.setStopCallback(stop)

print('algo=', algo)

# Perform the simulation
algo.run()

# Stream out the result
result = algo.getResult()
print('result=', result)

ref_mu = composite.getSample(1000000).computeMean()
ref_var = composite.getSample(1000000).computeVariance()
print('mu=', ref_mu, 'var=', ref_var)
ott.assert_almost_equal(result.getExpectationEstimate(), ref_mu, 1e-2, 1e-5)
#ott.assert_almost_equal(result.getVarianceEstimate(), ref_var, 1e-2, 1e-5)

expectationDistribution = result.getExpectationDistribution()
print(expectationDistribution)

convergenceGraph = algo.drawExpectationConvergence()

#from openturns.viewer import View
# View(convergenceGraph).ShowAll()
for i in range(obsSize):
    for j in range(chainDim):
        P[i, j] = p[i, j]
Qn = P.transpose() * P + Q0
Qn_inv = ot.SquareMatrix(chainDim)
for j in range(chainDim):
    I_j = [0] * chainDim
    I_j[j] = 1.0
    Qn_inv_j = Qn.solveLinearSystem(I_j)
    for i in range(chainDim):
        Qn_inv[i, j] = Qn_inv_j[i]

sigma_exp = [0] * chainDim
for i in range(chainDim):
    sigma_exp[i] = m.sqrt(Qn_inv[i, i])
y_vec = [0] * obsSize
for i in range(obsSize):
    y_vec[i] = y_obs[i, 0]

x_emp = Qn.solveLinearSystem(P.transpose() * y_vec)
mu_exp = Qn.solveLinearSystem((P.transpose() * P) * x_emp +
                              ot.Matrix(Q0) * mu0)

print('sample mean=', x_mu)
print('expected mean=', mu_exp)
ott.assert_almost_equal(x_mu, mu_exp, 1e-1, 1e-3)

print('covariance=', x_cov)
print('expected covariance=', Qn_inv)
ott.assert_almost_equal(x_cov, Qn_inv, 1e-3, 2e-2)
Esempio n. 22
0
for method in methods:
    print("method=", method)
    # 1. Check with local error covariance
    print("Local error covariance")
    algo = ot.GaussianLinearCalibration(modelX, x, y, candidate,
                                        priorCovariance, errorCovariance,
                                        method)
    algo.run()
    calibrationResult = algo.getResult()

    # Analysis of the results
    # Maximum A Posteriori estimator
    thetaMAP = calibrationResult.getParameterMAP()
    exactTheta = ot.Point([5.69186, 0.0832132, 0.992301])
    rtol = 1.e-2
    assert_almost_equal(thetaMAP, exactTheta, rtol)

    # Covariance matrix of theta
    thetaPosterior = calibrationResult.getParameterPosterior()
    covarianceThetaStar = matrixToSample(thetaPosterior.getCovariance())
    exactCovarianceTheta = ot.Sample(\
        [[ 0.308302, -0.000665387, 6.81135e-05 ], \
         [ -0.000665387, 8.36243e-06, -8.86775e-07 ], \
         [ 6.81135e-05, -8.86775e-07, 9.42234e-08 ]])
    assert_almost_equal(covarianceThetaStar, exactCovarianceTheta)

    # Check other fields
    print("result=", calibrationResult)

    # 2. Check with global error covariance
    print("Global error covariance")
# Calibration of default optimizer
ot.ResourceMap.SetAsScalar(
    'GeneralLinearModelAlgorithm-DefaultOptimizationLowerBound', 1.0e-5)
ot.ResourceMap.SetAsScalar(
    'GeneralLinearModelAlgorithm-DefaultOptimizationUpperBound', 100)
# Data & estimation
inputDimension = 1
X = ot.Normal().getSample(100)
X = X.sortAccordingToAComponent(0)
covarianceModel = ot.SquaredExponential([1.0], [1.0])
model = ot.SymbolicFunction(["x"], ["x - 0.6 * cos(x/3)"])
Y = model(X)
basis = ot.QuadraticBasisFactory(inputDimension).build()
algo = ot.GeneralLinearModelAlgorithm(X, Y, covarianceModel, basis, True)
algo.setOptimizationAlgorithm(ot.NLopt('LN_NELDERMEAD'))
algo.run()

# perform an evaluation
result = algo.getResult()
metaModel = result.getMetaModel()
conditionalCovariance = result.getCovarianceModel()
residual = metaModel(X) - Y
ott.assert_almost_equal(residual.computeCenteredMoment(2),
                        [1.06e-05], 1e-5, 1e-5)
ott.assert_almost_equal(conditionalCovariance.getParameter(),
                        [0.619144, 0.000937], 5e-3, 1e-3)
likelihood = algo.getObjectiveFunction()
assert likelihood.getInputDimension() == 1, "likelihood dim"
print("ok")
def test_one_input_one_output():
    sampleSize = 6
    dimension = 1

    f = ot.SymbolicFunction(['x0'], ['x0 * sin(x0)'])

    X = ot.Sample(sampleSize, dimension)
    X2 = ot.Sample(sampleSize, dimension)
    for i in range(sampleSize):
        X[i, 0] = 3.0 + i
        X2[i, 0] = 2.5 + i
    X[0, 0] = 1.0
    X[1, 0] = 3.0
    X2[0, 0] = 2.0
    X2[1, 0] = 4.0
    Y = f(X)
    Y2 = f(X2)

    # create covariance model
    basis = ot.ConstantBasisFactory(dimension).build()
    covarianceModel = ot.SquaredExponential()

    # create algorithm
    algo = ot.KrigingAlgorithm(X, Y, covarianceModel, basis)

    # set sensible optimization bounds and estimate hyperparameters
    algo.setOptimizationBounds(ot.Interval(X.getMin(), X.getMax()))
    algo.run()

    # perform an evaluation
    result = algo.getResult()

    ott.assert_almost_equal(result.getMetaModel()(X), Y)
    ott.assert_almost_equal(result.getResiduals(), [1.32804e-07], 1e-3, 1e-3)
    ott.assert_almost_equal(result.getRelativeErrors(), [5.20873e-21])

    # Kriging variance is 0 on learning points
    covariance = result.getConditionalCovariance(X)
    nullMatrix = ot.Matrix(sampleSize, sampleSize)
    ott.assert_almost_equal(covariance, nullMatrix, 0.0, 1e-13)

    # Kriging variance is non-null on validation points
    validCovariance = result.getConditionalCovariance(X2)
    values = ot.Matrix([[
        0.81942182, -0.35599947, -0.17488593, 0.04622401, -0.03143555,
        0.04054783
    ],
                        [
                            -0.35599947, 0.20874735, 0.10943841, -0.03236419,
                            0.02397483, -0.03269184
                        ],
                        [
                            -0.17488593, 0.10943841, 0.05832917, -0.01779918,
                            0.01355719, -0.01891618
                        ],
                        [
                            0.04622401, -0.03236419, -0.01779918, 0.00578327,
                            -0.00467674, 0.00688697
                        ],
                        [
                            -0.03143555, 0.02397483, 0.01355719, -0.00467674,
                            0.0040267, -0.00631173
                        ],
                        [
                            0.04054783, -0.03269184, -0.01891618, 0.00688697,
                            -0.00631173, 0.01059488
                        ]])
    ott.assert_almost_equal(validCovariance - values, nullMatrix, 0.0, 1e-7)

    # Covariance per marginal & extract variance component
    coll = result.getConditionalMarginalCovariance(X)
    var = [mat[0, 0] for mat in coll]
    ott.assert_almost_equal(var, [0] * sampleSize, 1e-14, 1e-13)

    # Variance per marginal
    var = result.getConditionalMarginalVariance(X)
    ott.assert_almost_equal(var, ot.Sample(sampleSize, 1), 1e-14, 1e-13)

    # Prediction accuracy
    ott.assert_almost_equal(Y2, result.getMetaModel()(X2), 0.3, 0.0)
    X[1, 0] = 3.0
    X2[0, 0] = 2.0
    X2[1, 0] = 4.0
    Y = model(X)
    # Data validation
    Y2 = model(X2)
    for i in range(sampleSize):
        # Add a small noise to data
        Y[i, 0] += 0.01 * ot.DistFunc.rNormal()

    basis = ot.LinearBasisFactory(spatialDimension).build()
    covarianceModel = ot.DiracCovarianceModel(spatialDimension)
    algo = ot.GeneralizedLinearModelAlgorithm(X, Y, covarianceModel, basis)
    algo.run()

    # perform an evaluation
    result = algo.getResult()
    metaModel = result.getMetaModel()
    conditionalCovariance = result.getCovarianceModel()
    residual = metaModel(X) - Y
    assert_almost_equal(residual.computeCenteredMoment(2),
                        [0.00013144], 1e-5, 1e-5)
    assert_almost_equal(conditionalCovariance.getParameter(), [
                        0.011464782674211804], 1e-5, 1e-3)
    print("Test Ok")

except:
    import sys
    print("t_GeneralizedLinearModelAlgorithm_std_hmat.py",
          sys.exc_info()[0], sys.exc_info()[1])
Esempio n. 26
0
from __future__ import print_function
import openturns as ot
from openturns.testing import assert_almost_equal
from openturns.usecases import stressed_beam as stressed_beam
from math import pi


ot.TESTPREAMBLE()
ot.PlatformInfo.SetNumericalPrecision(5)

"""
Test the import of the AxialStressedBeam data class.
"""
sb = stressed_beam.AxialStressedBeam()


# test parameters
assert_almost_equal(sb.D, 0.02, 1e-12)
assert_almost_equal(sb.muR, 3.0e6, 1e-12)
assert_almost_equal(sb.sigmaR, 3.0e5, 1e-12)
assert_almost_equal(sb.muF, 750.0, 1e-12)
assert_almost_equal(sb.sigmaF, 50.0, 1e-12)

# test marginals means
assert_almost_equal(sb.distribution_R.getMean()[0], 3.0e6, 1e-12)
assert_almost_equal(sb.distribution_F.getMean()[0], 750.0, 1e-12)

# special value of the model function
X = ot.Point([1.0, pi/10000.0])
assert_almost_equal(sb.model(X), [0.0], 1e-12)
    X2[0, 0] = 2.0
    X2[1, 0] = 4.0
    Y = model(X)
    # Data validation
    Y2 = model(X2)
    for i in range(sampleSize):
        # Add a small noise to data
        Y[i, 0] += 0.01 * ot.DistFunc.rNormal()

    basis = ot.LinearBasisFactory(spatialDimension).build()
    covarianceModel = ot.DiracCovarianceModel(spatialDimension)
    algo = ot.GeneralizedLinearModelAlgorithm(X, Y, covarianceModel, basis)
    algo.run()

    # perform an evaluation
    result = algo.getResult()
    metaModel = result.getMetaModel()
    conditionalCovariance = result.getCovarianceModel()
    residual = metaModel(X) - Y
    assert_almost_equal(residual.computeCenteredMoment(2), [0.00013144], 1e-5,
                        1e-5)
    assert_almost_equal(conditionalCovariance.getParameter(),
                        [0.011464782674211804], 1e-5, 1e-3)
    print("Test Ok")

except:
    import sys
    print("t_GeneralizedLinearModelAlgorithm_std_hmat.py",
          sys.exc_info()[0],
          sys.exc_info()[1])
def py_f(X):
    return X


# Check if the meoization propagates through the finite difference gradients
# Here we use a PythonFunction as its gradient/hessian are based on finite
# differences by default
ot_f = ot.MemoizeFunction(ot.PythonFunction(3, 3, py_f))
x = [1.0, 2.0, 3.0]
n_calls_0 = ot_f.getCallsNumber()
res_f = ot_f(x)
res_grad = ot_f.gradient(x)
res_hess = ot_f.hessian(x)
n_calls_1 = ot_f.getCallsNumber()
# 25=1+6+18
assert_almost_equal(n_calls_1 - n_calls_0, 25, 0.0, 0.0)
# Do the computation once again
n_calls_0 = n_calls_1
res_f = ot_f(x)
res_grad = ot_f.gradient(x)
res_hess = ot_f.hessian(x)
n_calls_1 = ot_f.getCallsNumber()
# 0=everything is reused
assert_almost_equal(n_calls_1 - n_calls_0, 0, 0.0, 0.0)
# Now, switch to noncentered gradients to reduce the calls to the minimum
eps = 1e-8
gr_f = ot.NonCenteredFiniteDifferenceGradient(eps, ot_f.getEvaluation())
ot_f.setGradient(gr_f)
x = [3, 1, 2]
n_calls_0 = n_calls_1
res_f = ot_f(x)
ot.TESTPREAMBLE()

f = ot.SymbolicFunction(["x"], ["sin(x)"])
a = -2.5
b = 4.5
# Integrate sin(t) between a & b --> cos(b) - sin(b)
ref = math.cos(a) - math.cos(b)

all_methods = [ot.FejerAlgorithm.FEJERTYPE1,
               ot.FejerAlgorithm.FEJERTYPE2, ot.FejerAlgorithm.CLENSHAWCURTIS]
# 1D checking
for method in all_methods:
    algo = ot.FejerAlgorithm([100],  method)
    value, adaptedNodes = algo.integrateWithNodes(f, ot.Interval(a, b))
    ott.assert_almost_equal(value[0], ref, 1e-10, 1e-10)

g = ot.SymbolicFunction(["x", "y"], ["cos(pi_ * x / 2) * sin(pi_ * y)"])
ref = 8 / (math.pi * math.pi)
interval = ot.Interval([-1, 0], [1, 1])
for method in all_methods:
    algo = ot.FejerAlgorithm([64, 64], method)
    value, adaptedNodes = algo.integrateWithNodes(g, interval)
    ott.assert_almost_equal(value[0], ref, 1e-10, 1e-10)

# Now we use the same calculus using variables changes
h = ot.SymbolicFunction(
    ["x", "y"], ["cos(pi_ * x / 2) * sin(pi_ * y / 2 + pi_/2 ) / 2"])
interval = ot.Interval([-1, -1], [1, 1])
for method in all_methods:
    algo = ot.FejerAlgorithm([64, 64],  method)
Esempio n. 30
0
    X[0, 0] = 1.0
    X[1, 0] = 3.0
    X2[0, 0] = 2.0
    X2[1, 0] = 4.0
    Y = model(X)
    # Data validation
    Y2 = model(X2)
    for i in range(sampleSize):
        # Add a small noise to data
        Y[i, 0] += 0.01 * ot.DistFunc.rNormal()

    basis = ot.LinearBasisFactory(inputDimension).build()
    covarianceModel = ot.DiracCovarianceModel(inputDimension)
    algo = ot.GeneralLinearModelAlgorithm(X, Y, covarianceModel, basis)
    algo.run()

    # perform an evaluation
    result = algo.getResult()
    metaModel = result.getMetaModel()
    conditionalCovariance = result.getCovarianceModel()
    residual = metaModel(X) - Y
    assert_almost_equal(residual.computeCenteredMoment(2), [0.00013144], 1e-5,
                        1e-5)
    print("Test Ok")

except:
    import sys
    print("t_GeneralLinearModelAlgorithm_std_hmat.py",
          sys.exc_info()[0],
          sys.exc_info()[1])
Esempio n. 31
0
Cov2.setScale(Y.computeStandardDeviation())

# This is the GSA-type estimator: weight is 1.
W = ot.SquareMatrix(size)
for i in range(size):
    W[i, i] = 1.0

# Using a biased estimator
estimatorTypeV = ot.HSICVStat()

# Loop over marginals
hsicIndexRef = [0.02331323, 0.00205350, 0.00791711]
for i in range(3):
    test = X.getMarginal(i)
    # Set input covariance scale
    Cov1.setScale(test.computeStandardDeviation())
    hsicIndex = estimatorTypeV.computeHSICIndex(test, Y, Cov1, Cov2, W)
    ott.assert_almost_equal(hsicIndex, hsicIndexRef[i])

# Using an unbiased estimator
estimatorTypeU = ot.HSICUStat()

# Loop over marginals
hsicIndexRef = [0.02228377, 0.00025668, 0.00599247]
for i in range(3):
    test = X.getMarginal(i)
    # Set input covariance scale
    Cov1.setScale(test.computeStandardDeviation())
    hsicIndex = estimatorTypeU.computeHSICIndex(test, Y, Cov1, Cov2, W)
    ott.assert_almost_equal(hsicIndex, hsicIndexRef[i])
# Define OptimizationProblem
problem = ot.OptimizationProblem(objectiveFun)
bounds = ot.Interval([0., 0., 0], [1., 1., 4])
varTypes = [ot.OptimizationProblemImplementation.INTEGER,
            ot.OptimizationProblemImplementation.CONTINUOUS, ot.OptimizationProblemImplementation.CONTINUOUS]
problem.setBounds(bounds)
problem.setVariablesType(varTypes)
problem.setMinimization(True)

# Define OptimizationAlgorithm
x0 = [0]*3
algo = ot.Bonmin(problem, "B-BB")
algo.setStartingPoint(x0)
algo.setMaximumEvaluationNumber(10000)
algo.setMaximumIterationNumber(1000)
#ot.ResourceMap.AddAsScalar('Bonmin-bonmin.time_limit', 60)
algo.run()

# Retrieve result
result = algo.getResult()
x_star = result.getOptimalPoint()
print("x*=", x_star)
y_star = result.getOptimalValue()
neval = result.getEvaluationNumber()
print("f(x*)=", y_star, "neval=", neval)


# ASSERTIONS
ott.assert_almost_equal(x_star, [1.0, 0.0, 0.25], 1, 5e-4)
Esempio n. 33
0
    for minimization in [True, False]:
        if algoName == 'NONLINEAR_CONJUGATE_GRADIENT' and not minimization:
            # goes very far and the function cannot evaluate
            continue
        print('algoName=', algoName, 'minimization=', minimization)
        problem = ot.OptimizationProblem(f)
        problem.setMinimization(minimization)
        algo = ot.Ceres(problem, algoName)
        algo.setStartingPoint(startingPoint)
        # algo.setProgressCallback(progress)
        # algo.setStopCallback(stop)
        algo.run()
        result = algo.getResult()
        x_star = result.getOptimalPoint()
        if minimization and algoName != 'STEEPEST_DESCENT':
            ott.assert_almost_equal(x_star, p_ref, 5e-2)
        print(result)

# least-squares optimization
n = 3
m = 10

x = [[0.5 + i] for i in range(m)]

model = ot.SymbolicFunction(['a', 'b', 'c', 'x'],
                            ['a + b * exp(min(500, c * x))'])
p_ref = [2.8, 1.2, 0.5]  # a, b, c
modelx = ot.ParametricFunction(model, [0, 1, 2], p_ref)
y = modelx(x)

Esempio n. 34
0
    def computePDF(self, x):
        u = x[0]
        if u < -1 or u > 1:
            y = 0.0
        else:
            y = self.c * (1 - u**2)**2
        return y

    def getRange(self):
        return ot.Interval(-1.0, 1.0)

# Using some reference values
# See https://en.wikipedia.org/wiki/Kernel_(statistics)#Kernel_functions_in_common_use
# First Normal dist with default ctor
distribution = ot.Normal()
ott.assert_almost_equal(distribution.getRoughness(),
                        0.5 /m.sqrt(m.pi))

# Dimension 2 (Fix https://github.com/openturns/openturns/issues/1485)
# Indep copula : product of integrales
distribution = ot.Normal(2)
ott.assert_almost_equal(distribution.getRoughness(),
                        compute_roughness_sampling(distribution))

# 2D Normal with scale & correlation
# This allows checking that Normal::getRoughness is well implemented
corr = ot.CorrelationMatrix(2)
corr[1, 0] = 0.3
distribution = ot.Normal([0, 0], [1, 2], corr)
ott.assert_almost_equal(distribution.getRoughness(),
                        compute_roughness_sampling(distribution))
Esempio n. 35
0
problem = ot.OptimizationProblem(objectiveFun)
problem.setInequalityConstraint(constraintFun)
bounds = ot.Interval([0., 0., 0., 0.], [1., 1., 1., 1.])
problem.setBounds(bounds)
problem.setMinimization(True)
problem.setVariablesType([ot.OptimizationProblemImplementation.BINARY, ot.OptimizationProblemImplementation.BINARY,
                          ot.OptimizationProblemImplementation.BINARY, ot.OptimizationProblemImplementation.BINARY])

# Define OptimizationAlgorithm
x0 = [0., 0., 0., 0.]
algo = ot.Bonmin(problem, "B-BB")
algo.setStartingPoint(x0)
algo.setMaximumEvaluationNumber(10000)
algo.setMaximumIterationNumber(1000)
ot.ResourceMap.AddAsScalar('Bonmin-bonmin.time_limit', 60)
algo.run()

# Retrieve result
result = algo.getResult()
x_star = result.getOptimalPoint()
print("x*=", x_star)
y_star = result.getOptimalValue()
neval = result.getEvaluationNumber()
print("f(x*)=", y_star, "neval=", neval)

print("g(x*)=", constraintFun(x_star))


# ASSERTION
ott.assert_almost_equal(x_star, [0, 1, 1, 1], 1, 5e-4)
    print("================")
    print("Test using NLOpt")
    print("================")
    # Calibration of default optimizer
    ot.ResourceMap.SetAsNumericalScalar('GeneralizedLinearModelAlgorithm-DefaultOptimizationLowerBound', 1.0e-5)
    ot.ResourceMap.SetAsNumericalScalar('GeneralizedLinearModelAlgorithm-DefaultOptimizationUpperBound', 100)
    # Data & estimation
    spatialDimension = 1
    X = ot.Normal().getSample(100)
    X = X.sortAccordingToAComponent(0)
    covarianceModel = ot.SquaredExponential( [1.0], [1.0])
    model = ot.NumericalMathFunction(["x"], ["x - 0.6 * cos(x/3)"])
    Y = model(X)
    basis = ot.QuadraticBasisFactory(spatialDimension).build()
    algo = ot.GeneralizedLinearModelAlgorithm(X, Y, covarianceModel, basis)
    algo.setOptimizationSolver(ot.NelderMead())
    algo.run()

    # perform an evaluation
    result = algo.getResult()
    metaModel = result.getMetaModel()
    conditionalCovariance = result.getCovarianceModel()
    residual = metaModel(X) - Y
    assert_almost_equal(residual.computeCenteredMoment(2), [1.06e-05], 1e-5, 1e-5)
    assert_almost_equal(conditionalCovariance.getParameter(), [0.702138,0.00137], 2e-3, 1e-3)
    print("Test Ok")

except:
    import sys
    print("t_GeneralizedLinearModelAlgorithm_nlopt.py", sys.exc_info()[0], sys.exc_info()[1])