#! /usr/bin/env python from __future__ import print_function import openturns as ot dim = 2 # We create an empty series ts1 = ot.TimeSeries(0, dim) ts1.setName('Ts1') # We populate the empty ts for p in range(3): pt = ot.Point(dim) for i in range(dim): pt[i] = 10. * (p + 1) + i ts1.add(pt) print('ts1=', ts1) print('len=', len(ts1)) # We get the second element of the ts secondElement = ts1[1] print('second element=', secondElement) # We set the third element to a valid new element newPoint = ot.Point(dim + 1) for i in range(dim): newPoint[i + 1] = 1000. * (i + 1) ts1[2] = newPoint print('ts1=', ts1)
summation = ot.Sample(sample1 + sample2) subtraction = ot.Sample(sample2 - sample1) print('sample1 + sample2=', repr(summation)) print('sample2 - sample1=', repr(subtraction)) # Operator +=|-= sample3 = ot.Sample(sample2) sample4 = ot.Sample(sample2) sample3 += sample1 sample4 -= sample1 print('sample3=', repr(sample3)) print('sample4=', repr(sample4)) sample5 = ot.Sample(sample2) m = ot.SquareMatrix([[1, 2], [3, 5]]) v = ot.Point(2, 3.0) t = ot.Point(2, 5.0) print('sample5 =', sample5) print('sample*2:', sample5 * 2.) print('2*sample:', 2.0 * sample5) print('sample/2:', sample5 / 2.) print('sample*v:', sample5 * v) print('sample/v:', sample5 / v) # in-place sample5 += t print('sample+=t:', sample5)
bootstrapSizes = [0, 30] for bootstrapSize in bootstrapSizes: algo = ot.GaussianNonLinearCalibration(modelX, x, y, candidate, priorCovariance, errorCovariance) algo.setBootstrapSize(bootstrapSize) algo.run() # To avoid discrepance between the plaforms with or without CMinpack print("result (Auto)=", algo.getResult().getParameterMAP()) algo.setOptimizationAlgorithm( ot.MultiStart( ot.TNC(), ot.LowDiscrepancyExperiment( ot.SobolSequence(), ot.Normal( candidate, ot.CovarianceMatrix(ot.Point(candidate).getDimension())), ot.ResourceMap.GetAsUnsignedInteger( "GaussianNonLinearCalibration-MultiStartSize")).generate()) ) algo.run() # To avoid discrepance between the plaforms with or without CMinpack print("result (TNC)=", algo.getResult().getParameterMAP()) print("error=", algo.getResult().getObservationsError()) algo = ot.GaussianNonLinearCalibration(modelX, x, y, candidate, priorCovariance, globalErrorCovariance) algo.setBootstrapSize(bootstrapSize) algo.run() print("result (Global)=", algo.getResult().getParameterMAP()) # unobserved inputs
# # We load the FMU as a FMUFunction (see the # :doc:`tutorial<../_generated/otfmi.FMUFunction>`): import otfmi import otfmi.example.utility path_fmu = otfmi.example.utility.get_path_fmu("deviation") model_fmu = otfmi.FMUFunction(path_fmu, inputs_fmu=["E", "F", "L", "I"], outputs_fmu="y") # %% # We test the function wrapping the deviation model on a point: import openturns as ot point = ot.Point([3e7, 2e4, 255, 350]) model_evaluation = model_fmu(point) print("Running the FMU: deviation = {}".format(model_evaluation)) # %% # We define probability laws on the 4 uncertain inputs: E = ot.Beta(0.93, 3.2, 2.8e7, 4.8e7) F = ot.LogNormal() F.setParameter(ot.LogNormalMuSigma()([30.e3, 9e3, 15.e3])) L = ot.Uniform(250.0, 260.0) I = ot.Beta(2.5, 4.0, 310.0, 450.0) # %% # According to the laws of mechanics, when the length L increases, the moment # of inertia I decreases.
#! /usr/bin/env python from __future__ import print_function import openturns as ot myFunc = ot.SymbolicFunction( ['x1', 'x2'], ['x1*sin(x2)', 'cos(x1+x2)', '(x2+1)*exp(x1-2*x2)']) data = ot.Sample(9, myFunc.getInputDimension()) point = ot.Point(myFunc.getInputDimension()) point[0] = 0.5 point[1] = 0.5 data[0] = point point[0] = -0.5 point[1] = -0.5 data[1] = point point[0] = -0.5 point[1] = 0.5 data[2] = point point[0] = 0.5 point[1] = -0.5 data[3] = point point[0] = 0.5 point[1] = 0.5 data[4] = point point[0] = -0.25 point[1] = -0.25 data[5] = point point[0] = -0.25 point[1] = 0.25 data[6] = point point[0] = 0.25
print("Continuous = ", distribution.isContinuous()) # Has this distribution an independent copula ? print("Has independent copula = ", distribution.hasIndependentCopula()) # Test for realization of distribution oneRealization = distribution.getRealization() print("oneRealization=", repr(oneRealization)) # Test for sampling size = 10 oneSample = distribution.getSample(size) print("oneSample=Ok", repr(oneSample)) # Define a point point = ot.Point(distribution.getDimension(), 2.0) # Show PDF and CDF of a point pointPDF = distribution.computePDF(point) pointCDF = distribution.computeCDF(point) print("point= ", repr(point), " pdf= %.12g" % pointPDF, " cdf=", pointCDF) # Get 95% quantile quantile = distribution.computeQuantile(0.95) print("Quantile=", repr(quantile)) print("entropy=%.6f" % distribution.computeEntropy()) print("Standard representative=", distribution.getStandardRepresentative()) print("parameter=", distribution.getParameter()) print("parameterDescription=", distribution.getParameterDescription()) parameter = distribution.getParameter()
methods = ["SVD", "QR", "Cholesky"] for method in methods: print("method=", method) # 1. Check with local error covariance print("Local error covariance") algo = ot.GaussianLinearCalibration(modelX, x, y, candidate, priorCovariance, errorCovariance, method) algo.run() calibrationResult = algo.getResult() # Analysis of the results # Maximum A Posteriori estimator thetaMAP = calibrationResult.getParameterMAP() exactTheta = ot.Point([5.69186, 0.0832132, 0.992301]) rtol = 1.e-2 assert_almost_equal(thetaMAP, exactTheta, rtol) # Covariance matrix of theta thetaPosterior = calibrationResult.getParameterPosterior() covarianceThetaStar = matrixToSample(thetaPosterior.getCovariance()) exactCovarianceTheta = ot.Sample(\ [[ 0.308302, -0.000665387, 6.81135e-05 ], \ [ -0.000665387, 8.36243e-06, -8.86775e-07 ], \ [ 6.81135e-05, -8.86775e-07, 9.42234e-08 ]]) assert_almost_equal(covarianceThetaStar, exactCovarianceTheta) # Check other fields print("result=", calibrationResult)
# QQPlot tests size = 100 normal = ot.Normal(1) sample = normal.getSample(size) sample2 = ot.Gamma(3.0, 4.0, 0.0).getSample(size) graph = ot.VisualTest.DrawQQplot(sample, sample2, 100) # graph.draw('curve4.png') view = View(graph) # view.save('curve4.png') view.ShowAll(block=True) # Clouds tests dimension = 2 R = ot.CorrelationMatrix(dimension) R[0, 1] = 0.8 distribution = ot.Normal(ot.Point(dimension, 3.0), ot.Point(dimension, 2.0), R) size = 100 sample1 = ot.Normal([3.0] * dimension, [2.0] * dimension, R).getSample(size) sample2 = ot.Normal([2.0] * dimension, [3.0] * dimension, R).getSample(size // 2) cloud1 = ot.Cloud(sample1, "blue", "fsquare", "Sample1 Cloud") cloud2 = ot.Cloud(sample2, "red", "fsquare", "Sample2 Cloud") graph = ot.Graph("two samples clouds", "x1", "x2", True, "topright") graph.add(cloud1) graph.add(cloud2) # graph.draw('curve5.png') view = View(graph) # view.save('curve5.png') view.show()
import openturns as ot ot.TESTPREAMBLE() ot.PlatformInfo.SetNumericalPrecision(3) # linear levelFunction = ot.SymbolicFunction(["x1", "x2", "x3", "x4"], ["x1+2*x2-3*x3+4*x4"]) # Add a finite difference gradient to the function myGradient = ot.NonCenteredFiniteDifferenceGradient( 1e-7, levelFunction.getEvaluation()) print("myGradient = ", repr(myGradient)) # Substitute the gradient levelFunction.setGradient(ot.NonCenteredFiniteDifferenceGradient(myGradient)) startingPoint = ot.Point(4, 0.0) algo = ot.SQP(ot.NearestPointProblem(levelFunction, 3.0)) algo.setStartingPoint(startingPoint) print('algo=', algo) algo.run() result = algo.getResult() print('result=', result) # non-linear levelFunction = ot.SymbolicFunction(["x1", "x2", "x3", "x4"], ["x1*cos(x1)+2*x2*x3-3*x3+4*x3*x4"]) # Add a finite difference gradient to the function, # needs it myGradient = ot.NonCenteredFiniteDifferenceGradient( 1e-7, levelFunction.getEvaluation()) # Substitute the gradient
ot.Log.Show(ot.Log.NONE) # %% # We first load the data class from the usecases module : from openturns.usecases import cantilever_beam as cantilever_beam cb = cantilever_beam.CantileverBeam() # %% # We want to create the random variable of interest Y=g(X) where :math:`g(.)` is the physical model and :math:`X` is the input vectors. For this example we consider independent marginals. # %% # We set a `mean` vector and a unitary standard deviation : dim = cb.dim mean = [50.0, 1.0, 10.0, 5.0] sigma = ot.Point(dim, 1.0) R = ot.IdentityMatrix(dim) # %% # We create the input parameters distribution and make a random vector : distribution = ot.Normal(mean, sigma, R) X = ot.RandomVector(distribution) X.setDescription(['E', 'F', 'L', 'I']) # %% # `f` is the cantilever beam model : f = cb.model # %% # The random variable of interest Y is then Y = ot.CompositeRandomVector(f, X)
distribution = ot.ComposedDistribution(coll) factoryCollection = [ ot.OrthogonalUniVariateFunctionFamily( ot.OrthogonalUniVariatePolynomialFunctionFactory( ot.StandardDistributionPolynomialFactory(dist))) for dist in coll ] functionFactory = ot.OrthogonalProductFunctionFactory(factoryCollection) size = 1000 X = distribution.getSample(size) Y = model(X) # ot.ResourceMap.Set('TensorApproximationAlgorithm-Method', 'RankM') # n-d nk = [10] * dim maxRank = 5 algo = ot.TensorApproximationAlgorithm(X, Y, distribution, functionFactory, nk, maxRank) algo.run() result = algo.getResult() # print('residuals=', result.getResiduals()) ott.assert_almost_equal(result.getResiduals(), [0.000466643, 0.0]) metamodel = result.getMetaModel() x = distribution.getMean() print('x=', ot.Point(x), 'f(x)=', model(x), 'f^(x)=', metamodel(x)) for i in range(model.getOutputDimension()): print('rank[', i, ']=', result.getTensor(i).getRank())
#! /usr/bin/env python import openturns as ot ot.TESTPREAMBLE() # We create a numerical point of dimension 4 point = ot.Point([101., 102., 103., 104.]) print("point = ", repr(point)) # We create a 'constant' RandomVector from the Point vect = ot.RandomVector(ot.ConstantRandomVector(point)) print("vect=", vect) # Check standard methods of class RandomVector print("vect dimension=", vect.getDimension()) print("vect realization (first )=", repr(vect.getRealization())) print("vect realization (second)=", repr(vect.getRealization())) print("vect realization (third )=", repr(vect.getRealization())) print("vect sample =", repr(vect.getSample(5)))
print("mean=", oneSample.computeMean()) print("covariance=", oneSample.computeCovariance()) size = 100 for i in range(2): msg = '' if ot.FittingTest.ChiSquared(distribution.getSample(size), distribution).getBinaryQualityMeasure(): msg = "accepted" else: msg = "rejected" print("Chi2 test for the generator, sample size=", size, " is", msg) size *= 10 # Define a point point = ot.Point(distribution.getDimension(), 5.0) print("Point= ", point) # Show PDF and CDF of point eps = 1e-5 LPDF = distribution.computeLogPDF(point) print("log pdf= %.12g" % LPDF) PDF = distribution.computePDF(point) print("pdf = %.12g" % PDF) print("pdf (FD)= %.12g" % (distribution.computeCDF(point + ot.Point(1, 0)) - distribution.computeCDF(point + ot.Point(1, -1)))) CDF = distribution.computeCDF(point) print("cdf= %.12g" % CDF) CCDF = distribution.computeComplementaryCDF(point) print("ccdf= %.12g" % CCDF) CF = distribution.computeCharacteristicFunction(point[0])
failureBoundaryPhysicalSpace = ot.SymbolicFunction(['x'], ['10.0 / x']) failureBoundaryStandardSpace = ot.ComposedFunction( failureBoundaryPhysicalSpace, inverseTransformX1) x = np.linspace(1.1, 5.0, 100) cx = np.array([failureBoundaryStandardSpace([xi])[0] for xi in x]) graphStandardSpace = ot.Graph('Failure event in the standard space', r'$u_1$', r'$u_2$', True, '') curveCX = ot.Curve(x, cx, 'Boundary of the event $\partial \mathcal{D}$') curveCX.setLineStyle("solid") curveCX.setColor("blue") graphStandardSpace.add(curveCX) # %% # We add the origin to the previous graph. cloud = ot.Cloud(ot.Point([0.0]), ot.Point([0.0])) cloud.setColor("black") cloud.setPointStyle("fcircle") cloud.setLegend("origin") graphStandardSpace.add(cloud) graphStandardSpace.setGrid(True) graphStandardSpace.setLegendPosition("bottomright") # Some annotation texts = [r"Event : $\mathcal{D} = \{Y \geq 10.0\}$"] myText = ot.Text([[3.0, 4.0]], texts) myText.setTextSize(1) graphStandardSpace.add(myText) view = otv.View(graphStandardSpace) # %%
# Test for sampling size = 10000 oneSample = distribution.getSample(size) print("oneSample first=", oneSample[0], " last=", oneSample[size - 1]) print("mean=", oneSample.computeMean()) print("covariance=", oneSample.computeCovariance()) size = 100 for i in range(2): print( "Kolmogorov test for the generator, sample size=", size, " is ", ot.FittingTest.Kolmogorov(distribution.getSample(size), distribution).getBinaryQualityMeasure()) size *= 10 # Define a point point = ot.Point(distribution.getDimension(), 1.0) print("Point= ", point) # Show PDF and CDF of point eps = 1e-5 DDF = distribution.computeDDF(point) print("ddf =", DDF) LPDF = distribution.computeLogPDF(point) print("log pdf= %.12g" % LPDF) PDF = distribution.computePDF(point) print("pdf =%.6f" % PDF) print("pdf (FD)=%.6f" % ((distribution.computeCDF(point + [eps]) - distribution.computeCDF(point + [-eps])) / (2.0 * eps))) CDF = distribution.computeCDF(point) print("cdf= %.12g" % CDF)
#! /usr/bin/env python import openturns as ot ot.TESTPREAMBLE() # First, build two functions from R^3->R inVar = ['x1', 'x2', 'x3'] formula = ['x1^3 * sin(x2 + 2.5 * x3) - (x1 + x2)^2 / (1.0 + x3^2)'] functions = [] functions.append(ot.SymbolicFunction(inVar, formula)) formula = ['exp(-x1 * x2 + x3) / cos(1.0 + x2 * x3 - x1)'] functions.append(ot.SymbolicFunction(inVar, formula)) # Second, build the weights coefficients = [0.3, 2.9] # Third, build the function myFunction = ot.LinearCombinationFunction(functions, coefficients) inPoint = ot.Point([1.2, 2.3, 3.4]) print('myFunction=', myFunction) print('Value at ', inPoint, '=', myFunction(inPoint)) print('Gradient at ', inPoint, '=', myFunction.gradient(inPoint)) print('Hessian at ', inPoint, '=', myFunction.hessian(inPoint))
#! /usr/bin/env python from __future__ import print_function import openturns as ot eps = 0.4 # Instance creation myFunc = ot.Function(['x1', 'x2'], ['f1', 'f2', 'f3'], ['x1*sin(x2)', 'cos(x1+x2)', '(x2+1)*exp(x1-2*x2)']) center = ot.Point(myFunc.getInputDimension()) for i in range(center.getDimension()): center[i] = 1.0 + i myTaylor = ot.QuadraticTaylor(center, myFunc) myTaylor.run() responseSurface = ot.Function(myTaylor.getResponseSurface()) print("myTaylor=", repr(myTaylor)) print("responseSurface=", repr(responseSurface)) print("myFunc(", repr(center), ")=", repr(myFunc(center))) print("responseSurface(", repr(center), ")=", repr(responseSurface(center))) inPoint = ot.Point(center) inPoint[0] += eps inPoint[1] -= eps / 2 print("myFunc(", repr(inPoint), ")=", repr(myFunc(inPoint))) print("responseSurface(", repr(inPoint), ")=", repr(responseSurface(inPoint)))
#! /usr/bin/env python import openturns as ot from openturns.testing import assert_almost_equal from openturns.usecases import branin_function as branin_function from math import pi ot.TESTPREAMBLE() ot.PlatformInfo.SetNumericalPrecision(5) """ Test the import of the BraninModel data class. """ bm = branin_function.BraninModel() # test parameters assert_almost_equal(bm.dim, 2, 1e-12) assert_almost_equal(bm.trueNoiseFunction, 0.1, 1e-12) assert_almost_equal(bm.lowerbound, ot.Point([0.0] * bm.dim), 1e-12) assert_almost_equal(bm.upperbound, ot.Point([1.0] * bm.dim), 1e-12) # test minima assert_almost_equal(bm.xexact1, ot.Point([0.123895, 0.818329]), 1e-12) assert_almost_equal(bm.xexact2, ot.Point([0.542773, 0.151666]), 1e-12) assert_almost_equal(bm.xexact3, ot.Point([0.961652, 0.165000]), 1e-12)
# Is this distribution continuous ? print("Continuous = ", distribution.isContinuous()) # Test for realization of distribution oneRealization = distribution.getRealization() print("oneRealization=", oneRealization) # Test for sampling size = 10000 oneSample = distribution.getSample(size) print("oneSample first=", oneSample[0], " last=", oneSample[size - 1]) print("mean=", oneSample.computeMean()) print("covariance=", oneSample.computeCovariance()) # Define a point point = ot.Point(distribution.getDimension(), 1.0) print("Point= ", point) # Show PDF and CDF of point eps = 1e-5 DDF = distribution.computeDDF(point) print("ddf =", DDF) print("ddf (FD)= %.6g" % ((distribution.computePDF(point + ot.Point(1, eps)) - distribution.computePDF(point + ot.Point(1, -eps))) / (2.0 * eps))) LPDF = distribution.computeLogPDF(point) print("log pdf= %.6g" % LPDF) PDF = distribution.computePDF(point) print("pdf =%.6g" % PDF) print("pdf (FD)=%.6g" % ((distribution.computeCDF(point + ot.Point(1, eps)) - distribution.computeCDF(point + ot.Point(1, -eps))) /
from __future__ import print_function import openturns as ot from openturns.testing import assert_almost_equal from openturns.usecases import stressed_beam as stressed_beam from math import pi ot.TESTPREAMBLE() ot.PlatformInfo.SetNumericalPrecision(5) """ Test the import of the AxialStressedBeam data class. """ sb = stressed_beam.AxialStressedBeam() # test parameters assert_almost_equal(sb.D, 0.02, 1e-12) assert_almost_equal(sb.muR, 3.0e6, 1e-12) assert_almost_equal(sb.sigmaR, 3.0e5, 1e-12) assert_almost_equal(sb.muF, 750.0, 1e-12) assert_almost_equal(sb.sigmaF, 50.0, 1e-12) # test marginals means assert_almost_equal(sb.distribution_R.getMean()[0], 3.0e6, 1e-12) assert_almost_equal(sb.distribution_F.getMean()[0], 750.0, 1e-12) # special value of the model function X = ot.Point([1.0, pi/10000.0]) assert_almost_equal(sb.model(X), [0.0], 1e-12)
#! /usr/bin/env python from __future__ import print_function import openturns as ot import openturns.testing import os ot.TESTPREAMBLE() try: fileName = 'myStudy.xml' # Create a Study Object by name myStudy = ot.Study(fileName) point = ot.Point(2, 1.0) myStudy.add("point", point) myStudy.save() myStudy2 = ot.Study(fileName) myStudy2.load() point2 = ot.Point() myStudy2.fillObject("point", point2) # cleanup os.remove(fileName) # Create a Study Object with compression myStudy = ot.Study() compressionLevel = 5 myStudy.setStorageManager(ot.XMLStorageManager(fileName, compressionLevel)) point = ot.Point(2, 1.0) myStudy.add("point", point) myStudy.save()
print('Continuous = ', distribution[testCase].isContinuous()) # Test for realization of distribution oneRealization = distribution[testCase].getRealization() print('oneRealization=', repr(oneRealization)) # Test for sampling size = 10000 oneSample = distribution[testCase].getSample(size) print('oneSample first=', repr(oneSample[0]), ' last=', repr(oneSample[size - 1])) print('mean=', repr(oneSample.computeMean())) print('covariance=', repr(oneSample.computeCovariance())) # Define a point point = ot.Point(distribution[testCase].getDimension(), 1.5) print('Point= ', repr(point)) # Show PDF and CDF of point eps = 1e-5 DDF = distribution[testCase].computeDDF(point) print('ddf =', repr(DDF)) print('ddf (ref)=', repr(referenceDistribution[testCase].computeDDF(point))) PDF = distribution[testCase].computePDF(point) print('pdf =%.6f' % PDF) print('pdf (ref)=%.6f' % referenceDistribution[testCase].computePDF(point)) CDF = distribution[testCase].computeCDF(point)
#! /usr/bin/env python import openturns as ot ot.TESTPREAMBLE() # Instantiate one distribution object dimension = 3 meanPoint = ot.Point(dimension, 1.0) meanPoint[0] = 0.5 meanPoint[1] = -0.5 sigma = ot.Point(dimension, 1.0) sigma[0] = 2.0 sigma[1] = 3.0 R = ot.CorrelationMatrix(dimension) for i in range(1, dimension): R[i, i - 1] = 0.5 # Create a collection of distribution aCollection = ot.DistributionCollection() aCollection.add(ot.Normal(meanPoint, sigma, R)) meanPoint += ot.Point(meanPoint.getDimension(), 1.0) aCollection.add(ot.Normal(meanPoint, sigma, R)) meanPoint += ot.Point(meanPoint.getDimension(), 1.0) aCollection.add(ot.Normal(meanPoint, sigma, R)) # Instantiate one distribution object distribution = ot.Mixture(aCollection, ot.Point(aCollection.getSize(), 2.0)) print("Distribution ", repr(distribution)) print("Weights = ", repr(distribution.getWeights()))
return deviation if __name__=='__main__': import argparse parser = argparse.ArgumentParser(description="Python wrapper example.") parser.add_argument('-X', nargs=4, metavar=('X1', 'X2', 'X3', 'X4'), help='Vector on which the model will be evaluated') parser.add_argument('-N', nargs=1, type=int, help='Number of samples to evaluate') parser.add_argument('-n_cpus', nargs=1, type=int, help='Number of jobs to use') args = parser.parse_args() #model = ot.Function(Wrapper()) if args.n_cpus: n_cpus=args.n_cpus[0] else: n_cpus=-1 model = otw.Parallelizer(Wrapper(), backend='joblib', n_cpus=n_cpus) X_distribution = define_distribution() if args.X: X = ot.Point([float(x) for x in args.X]) elif args.N: X = X_distribution.getSample(args.N[0]) starttime = time() Y = model(X) fulltime = ot.Sample([[time() - starttime]]) fulltime.exportToCSVFile('time.csv') Y.exportToCSVFile('result.csv')
import openturns as ot from matplotlib import pyplot as plt from openturns.viewer import View ot.RandomGenerator.SetSeed(0) # Generate sample with the given plane size = 20 dim = 2 refSample = ot.Sample(size, dim) for i in range(size): p = ot.Point(dim) for j in range(dim): p[j] = i + j refSample[i] = p myPlane = ot.BootstrapExperiment(refSample) sample = myPlane.generate() # Create an empty graph graph = ot.Graph("Bootstrap experiment", "x1", "x2", True, "") # Create the cloud cloud = ot.Cloud(sample, "blue", "fsquare", "") # Then, draw it graph.add(cloud) fig = plt.figure(figsize=(4, 4)) axis = fig.add_subplot(111) axis.set_xlim(auto=True) View(graph, figure=fig, axes=[axis], add_legend=False)
str(F.getOutputDimension()))) print((F((10, 5)))) print((F(((10, 5), (6, 7))))) # Instance creation myFunc = ot.Function(F) # Copy constructor newFunc = ot.Function(myFunc) print(('myFunc input dimension= ' + str(myFunc.getInputDimension()))) print(('myFunc output dimension= ' + str(myFunc.getOutputDimension()))) inPt = ot.Point(2, 2.) print((repr(inPt))) outPt = myFunc(inPt) print((repr(outPt))) outPt = myFunc((10., 11.)) print((repr(outPt))) inSample = ot.Sample(10, 2) for i in range(10): inSample[i] = ot.Point((i, i)) print((repr(inSample))) outSample = myFunc(inSample) print((repr(outSample)))
from __future__ import print_function import openturns as ot import openturns.testing import os import sys import math as m ot.TESTPREAMBLE() try: fileName = 'myStudy.xml' # Create a Study Object by name myStudy = ot.Study(fileName) point = ot.Point(2, 1.0) myStudy.add("point", point) myStudy.save() myStudy2 = ot.Study(fileName) myStudy2.load() point2 = ot.Point() myStudy2.fillObject("point", point2) # cleanup os.remove(fileName) # Create a Study Object with compression myStudy = ot.Study() compressionLevel = 5 myStudy.setStorageManager( ot.XMLStorageManager(fileName + ".gz", compressionLevel)) point = ot.Point(2, 1.0)
print("Continuous = ", distribution.isContinuous()) # Test for realization of distribution oneRealization = distribution.getRealization() print("oneRealization=", repr(oneRealization)) # Test for sampling size = 10000 oneSample = distribution.getSample(size) print("oneSample first=", repr( oneSample[0]), " last=", repr(oneSample[size - 1])) print("mean=", repr(oneSample.computeMean())) print("covariance=", repr(oneSample.computeCovariance())) # Define a point point = ot.Point(distribution.getDimension(), 0.5) print("Point= ", repr(point)) # Show PDF and CDF of point eps = 1e-5 # derivative of PDF with respect to its arguments DDF = distribution.computeDDF(point) print("ddf =", repr(cleanPoint(DDF))) # by the finite difference technique ddfFD = ot.Point(dim) for i in range(dim): pointEps = point pointEps[i] += eps ddfFD[i] = distribution.computePDF(pointEps) pointEps[i] -= 2.0 * eps
# %% # Definition of the function # -------------------------- # %% import openturns as ot import openturns.viewer as viewer from matplotlib import pylab as plt ot.Log.Show(ot.Log.NONE) # %% rosenbrock = ot.SymbolicFunction(['x1', 'x2'], ['(1-x1)^2+100*(x2-x1^2)^2']) # %% x0 = ot.Point([-1.0, 1.0]) # %% xexact = ot.Point([1.0, 1.0]) # %% lowerbound = [-2.0, -2.0] upperbound = [2.0, 2.0] # %% # Plot the iso-values of the objective function # --------------------------------------------- # %% rosenbrock = ot.MemoizeFunction(rosenbrock)
# %% # We can now build the `FMUFunction`. In the example below, we use the # initialization script to fix the values of ``L`` and ``F`` in the FMU whereas # ``E`` and `Ì` are the function variables. import openturns as ot from os.path import abspath function = otfmi.FMUFunction( path_fmu, inputs_fmu=["E", "I"], outputs_fmu=["y"], initialization_script=abspath("initialization.mos")) inputPoint = ot.Point([2e9, 7e7]) outputPoint = function(inputPoint) print(outputPoint) # %% # .. note:: # It is possible to set the value of a model input in the # initialization script *and* use it as a function input variable. In this # case, the initial value from the initialization script is overriden. # %% # For instance, we consider the 4 model parameters as variables. Note the # result is different from above, as the input point overrides the values from # the initialization script. smallExampleFunction = otfmi.FMUFunction(