def __init__(self, simulateur, annee, S, D): """ Crée un modèle de pension probabiliste. Paramètres : simulateur : un SimulateurRetraite annee : un flottant, l'année de calcul de P S : un flottant, le solde financier en part de PIB D : un flottant positif, le montant des dépenses de retraites en part de PIB Description : Crée un modèle de pension probabiliste pour le ratio (pension moyenne) / (salaire moyen). Les entrées du modèle sont "As", "F", "TauC" et la sortie est "P". Les paramètres S et D sont fixés par le constructeur de la classe au moment de la création de l'objet. * S : le solde financier du système de retraites (% PIB) * D : le montant des dépenses (% PIB) * As : l'âge moyen de départ à la retraite défini par l'utilisateur * F : facteur d'élasticité de report de l'âge de départ (par exemple F=0.5) * TauC : le taux de chômage (par exemple TauC = 4.5) Les distributions des variables sont ot.Uniform et indépendantes. Exemple : S = 0.0 D = 0.14 annee = 2050 modele = ModelePensionProbabiliste(simulateur, annee, S, D) fonction = modele.getFonction() inputDistribution = modele.getInputDistribution() """ # Crée le modèle de pension complet : entrées = (S, D, As, F, TauC) modelePension = ot.Function(FonctionPension(simulateur, annee)) # Crée le modèle réduit à partir du modèle complet : entrées = (As, F, TauC) indices = ot.Indices([0, 1]) referencePoint = ot.Point([S, D]) self.fonction = ot.ParametricFunction(modelePension, indices, referencePoint) # Distribution As = ot.Uniform(62.0, 66.0) F = ot.Uniform(0.25, 0.75) TauC = ot.Uniform(4.5, 10.0) self.inputDistribution = ot.ComposedDistribution([As, F, TauC]) self.inputDistribution.setDescription(["As", "F", "TauC"]) return
=================== """ # %% # In this example we are going to concatenate several processes that share the same mesh. # %% from __future__ import print_function import openturns as ot import openturns.viewer as viewer from matplotlib import pylab as plt ot.Log.Show(ot.Log.NONE) # %% # Create processes to aggregate myMesher = ot.IntervalMesher(ot.Indices([100, 10])) lowerbound = [0.0, 0.0] upperBound = [2.0, 4.0] myInterval = ot.Interval(lowerbound, upperBound) myMesh = myMesher.build(myInterval) myProcess1 = ot.WhiteNoise(ot.Normal(), myMesh) myProcess2 = ot.WhiteNoise(ot.Triangular(), myMesh) # %% # Draw values of a realization of the 2nd process marginal = ot.HistogramFactory().build(myProcess1.getRealization().getValues()) graph = marginal.drawPDF() view = viewer.View(graph) # %% # Create an aggregated process
gibbs = ot.Gibbs([rwmh_beta, rwmh_alpha]) sample = gibbs.getSample(1000) print('mu=', sample.computeMean()) print('sigma=', sample.computeStandardDeviation()) # check recompute indices, update bug initial_state = [0.0, 0.0, 20.0] target = ot.Normal(3) weird_target = ot.ComposedDistribution( [ot.Normal(), ot.Normal(), ot.Dirac(20.0)]) normal0_rwmh = ot.RandomWalkMetropolisHastings(target, initial_state, ot.Uniform(-10, 10), [0]) # samples from Normal(0,1) normal1_rwmh = ot.RandomWalkMetropolisHastings(target, initial_state, ot.Uniform(-10, 10), [1]) # samples from Normal(0,1) dirac_rwmh = ot.RandomWalkMetropolisHastings(weird_target, initial_state, ot.Normal(), [2]) # samples from Dirac(20) # samples from Normal(0,1) x Normal(0,1) x Dirac(20) gibbs = ot.Gibbs([normal0_rwmh, normal1_rwmh, dirac_rwmh]) sample = gibbs.getSample(1000) recompute = gibbs.getRecomputeLogPosterior() print(recompute) assert recompute == ot.Indices([1, 0, 1]), "wrong recompute indices" mean = sample.computeMean() stddev = sample.computeStandardDeviation() print(mean, stddev) ott.assert_almost_equal(mean, [-0.015835, 0.169951, 20]) ott.assert_almost_equal(stddev, [0.956516, 1.05469, 0])
resCDF = distribution.computeSequentialConditionalCDF(pt) print("sequential conditional CDF(", pt, ")=", resCDF) print("sequential conditional quantile(", resCDF, ")=", distribution.computeSequentialConditionalQuantile(resCDF)) # Extract the marginals for i in range(dim): margin = distribution.getMarginal(i) print("margin=", repr(margin)) print("margin PDF=%.6f" % margin.computePDF([0.5])) print("margin CDF=%.6f" % margin.computeCDF([0.5])) print("margin quantile=", repr(margin.computeQuantile(0.95))) print("margin realization=", repr(margin.getRealization())) if (dim >= 2): # Extract a 2-D marginal indices = ot.Indices(2, 0) indices[0] = 1 indices[1] = 0 print("indices=", repr(indices)) margins = distribution.getMarginal(indices) print("margins=", repr(margins)) print("margins PDF=%.6f" % margins.computePDF([0.5] * 2)) print("margins CDF=%.6f" % margins.computeCDF([0.5] * 2)) quantile = margins.computeQuantile(0.95) print("margins quantile=", repr(quantile)) print("margins CDF(qantile)=%.6f" % margins.computeCDF(quantile)) print("margins realization=", repr(margins.getRealization())) chol = distribution.getCholesky() invChol = distribution.getInverseCholesky() print("chol=", repr(chol.clean(1e-6)))
ot.LinearModelTest.LinearModelResidualMean(sampleY, sampleZ).getPValue()) # Durbin Watson ot.RandomGenerator.SetSeed(5415) eps = ot.Normal(0, 20) f = ot.SymbolicFunction('x', '5+2*x+x^2-0.1*x^3') N = 15 x = ot.Sample([[0], [1.42857], [2.85714], [4.28571], [5.71429], [7.14286], [8.57143], [10], [11.4286], [12.8571], [14.2857], [15.7143], [17.1429], [18.5714], [20]]) y = f(x) + eps.getSample(N) linmodel = ot.LinearModelAlgorithm(x, y).getResult().getCoefficients() dwTest = ot.LinearModelTest.LinearModelDurbinWatson(x, y) print('Durbin Watson = ', dwTest) selection = ot.Indices(5) selection.fill() selection2 = ot.Indices(1, 0) sampleX0 = sampleX.getMarginal(0) # Regression test between 2 samples : firstSample of dimension n and # secondSample of dimension 1. If firstSample[i] is the numerical sample # extracted from firstSample (ith coordinate of each point of the # numerical sample), PartialRegression performs the Regression test # simultaneously on all firstSample[i] and secondSample, for i in the # selection. The Regression test tests ifthe regression model between two # scalar numerical samples is significant. It is based on the deviation # analysis of the regression. The t-test is used. # The two tests must be equal
ot.Normal(0.1, 0.0161812), ot.LogNormal(7.71, 1.0056), ot.Uniform(63070.0, 115600.0), ot.Uniform(990.0, 1110.0), ot.Uniform(63.1, 116.0), ot.Uniform(700.0, 820.0), ot.Uniform(1120.0, 1680.0), ot.Uniform(9855.0, 12045.0) ] distribution = ot.ComposedDistribution(coll) distribution.setDescription(input_names) # %% # Freeze r, Tu, Tl from model to go faster selection = [1, 2, 4] complement = ot.Indices(selection).complement(dimension) distribution = distribution.getMarginal(complement) model = ot.ParametricFunction(model, selection, distribution.getMarginal(selection).getMean()) input_names_copy = list(input_names) input_names = itemgetter(*complement)(input_names) dimension = len(complement) # %% # design of experiment size = 1000 X = distribution.getSample(size) Y = model(X) # %% # create a functional chaos model
#!/usr/bin/env python from __future__ import print_function import openturns as ot import otmorris # Define model ot.RandomGenerator.SetSeed(0) # Number of trajectories r = 5 # Define experiments in [0,1]^2 print("Use Case #1 : generate trajectories from regular grid") levels = ot.Indices(2) levels.fill(5, 0) morris_experiment = otmorris.MorrisExperimentGrid(levels, r) grid_bound = morris_experiment.getBounds() sample1 = morris_experiment.generate() print("Morris experiment generated from grid = ", sample1) print("Use Case #2 : generate trajectories from initial lhs design") size = 20 # Generate an LHS design dist = ot.ComposedDistribution(2 * [ot.Uniform(0, 1)]) experiment = ot.LHSExperiment(dist, size, True, False) lhsDesign = experiment.generate() print("Initial LHS design = ", lhsDesign) # Generate designs morris_experiment_lhs = otmorris.MorrisExperimentLHS(lhsDesign, r) lhs_bound = morris_experiment_lhs.getBounds() sample2 = morris_experiment.generate() print("Morris experiment generated from LHS = ", sample2)
import openturns as ot from matplotlib import pyplot as plt from openturns.viewer import View ot.RandomGenerator.SetSeed(0) # Generate sample with the given plane distribution = ot.ComposedDistribution( ot.DistributionCollection( [ot.Exponential(), ot.Triangular(-1.0, -0.5, 1.0)])) marginalDegrees = ot.Indices([3, 6]) myPlane = ot.GaussProductExperiment(ot.Distribution(distribution), marginalDegrees) sample = myPlane.generate() # Create an empty graph graph = ot.Graph("", "x1", "x2", True, "") # Create the cloud cloud = ot.Cloud(sample, "blue", "fsquare", "") # Then, draw it graph.add(cloud) fig = plt.figure(figsize=(4, 4)) plt.suptitle("Gauss product experiment") axis = fig.add_subplot(111) axis.set_xlim(auto=True) View(graph, figure=fig, axes=[axis], add_legend=False)
# Check 2-d array which nested dim is size=1 / Function # interoperability def aFunc2(x): return [2.0 * x[0]] PYNMF = ot.PythonFunction(1, 1, aFunc2) a0 = np.array(([1.])) print("Point", PYNMF(a0), "= PYNMF( array", a0, ")") a1 = np.array(([1.], [2.], [3.])) print("Sample", PYNMF(a1), "= PYNMF( array", a1, ")") # Check tuple / Indices conversion t0 = (1, 2) i0 = ot.Indices(t0) print("tuple", t0, "=> Indices", i0) t1 = tuple(i0) print("Indices", i0, "=> tuple", tuple([int(x) for x in t0])) # Check list / Indices conversion l0 = [3, 4, 5] i0 = ot.Indices(l0) print("list", l0, "=> Indices", i0) l1 = list(i0) print("Indices", i0, "=> list", [int(x) for x in l1]) # check Indices typemap sample = ot.Normal(3).getSample(10)
import openturns as ot from matplotlib import pyplot as plt from openturns.viewer import View ot.RandomGenerator.SetSeed(0) # Generate sample with the given plane distribution = ot.ComposedDistribution( [ot.Exponential(), ot.Triangular(-1.0, -0.5, 1.0)]) marginalSizes = ot.Indices([3, 6]) experiment = ot.GaussProductExperiment( ot.Distribution(distribution), marginalSizes) sample = experiment.generate() # Create an empty graph graph = ot.Graph("Gauss product experiment", "x1", "x2", True, "") # Create the cloud cloud = ot.Cloud(sample, "blue", "fsquare", "") # Then, draw it graph.add(cloud) fig = plt.figure(figsize=(4, 4)) axis = fig.add_subplot(111) axis.set_xlim(auto=True) View(graph, figure=fig, axes=[axis], add_legend=False)
marginRef = copulaRef.getMarginal(i) print("margin=", repr(margin)) print("margin PDF =%.6f" % margin.computePDF(ot.Point(1, 0.25))) print("margin PDF (ref)=%.6f" % marginRef.computePDF(ot.Point(1, 0.25))) print("margin CDF =%.6f" % margin.computeCDF(ot.Point(1, 0.25))) print("margin CDF (ref)=%.6f" % marginRef.computeCDF(ot.Point(1, 0.25))) print("margin quantile =", repr(margin.computeQuantile(0.95))) print("margin quantile (ref)=", repr(marginRef.computeQuantile(0.95))) print("margin realization=", repr(margin.getRealization())) # Extract a 2-D marginal indices = ot.Indices([1, 0]) print("indices=", repr(indices)) margin = copula.getMarginal(indices) marginRef = copulaRef.getMarginal(indices) print("margin=", repr(margin)) print("margin PDF =%.6f" % margin.computePDF(ot.Point(2, 0.25))) print("margin PDF (ref)=%.6f" % marginRef.computePDF(ot.Point(2, 0.25))) print("margin CDF =%.6f" % margin.computeCDF(ot.Point(2, 0.25))) print("margin CDF (ref)=%.6f" % marginRef.computeCDF(ot.Point(2, 0.25))) print("margin quantile =", repr(margin.computeQuantile(0.95))) print("margin quantile (ref)=", repr(marginRef.computeQuantile(0.95))) print("margin realization=", repr(margin.getRealization())) # tbb nested parallelism issue
def _compute_coefficients_legendre(self, sample_paths, legendre_quadrature_order=None): dimension = self._lower_bound.size truncation_order = self._truncation_order if legendre_quadrature_order is None: legendre_quadrature_order = self._legendre_quadrature_order elif type(legendre_quadrature_order) is not int \ or legendre_quadrature_order <= 0: raise ValueError('legendre_quadrature_order must be a positive ' + 'integer.') n_sample_paths = len(sample_paths) # Gauss-Legendre quadrature nodes and weights polyColl = ot.PolynomialFamilyCollection([ot.LegendreFactory()] * dimension) polynoms = ot.OrthogonalProductPolynomialFactory(polyColl) U, W = polynoms.getNodesAndWeights( ot.Indices([legendre_quadrature_order] * dimension)) W = np.ravel(W) U = np.array(U) scale = (self._upper_bound - self._lower_bound) / 2. shift = (self._upper_bound + self._lower_bound) / 2. X = scale * U + shift # Compute coefficients try: available_memory = int(.9 * get_available_memory()) except: if self.verbose: print('WRN: Available memory estimation failed! ' 'Assuming 1Gb is available (first guess).') available_memory = 1024**3 max_size = int(available_memory / 8 / truncation_order / n_sample_paths) batch_size = min(W.size, max_size) if self.verbose and batch_size < W.size: print('RAM: %d Mb available' % (available_memory / 1024**2)) print('RAM: %d allocable terms / %d total terms' % (max_size, W.size)) print('RAM: %d loops required' % np.ceil(float(W.size) / max_size)) while True: coefficients = np.zeros((n_sample_paths, truncation_order)) try: n_done = 0 while n_done < W.size: sample_paths_values = np.vstack([ np.ravel(sample_paths[i](X[n_done:(n_done + batch_size)])) for i in range(n_sample_paths) ]) mean_values = np.ravel( self._mean(X[n_done:(n_done + batch_size)]))[np.newaxis, :] centered_sample_paths_values = \ sample_paths_values - mean_values del sample_paths_values, mean_values eigenelements_values = np.vstack([ self._eigenfunctions[k]( X[n_done:(n_done + batch_size)]) / np.sqrt(self._eigenvalues[k]) for k in range(truncation_order) ]) coefficients += np.sum( W[np.newaxis, np.newaxis, n_done:(n_done + batch_size)] * centered_sample_paths_values[:, np.newaxis, :] * eigenelements_values[np.newaxis, :, :], axis=-1) del centered_sample_paths_values, eigenelements_values n_done += batch_size break except MemoryError: batch_size /= 2 coefficients *= np.prod(self._upper_bound - self._lower_bound) return coefficients
def _legendre_galerkin_scheme(self, legendre_galerkin_order=10, legendre_quadrature_order=None): # Input checks if legendre_galerkin_order <= 0: raise ValueError('legendre_galerkin_order must be a positive ' + 'integer!') if legendre_quadrature_order is not None: if legendre_quadrature_order <= 0: raise ValueError('legendre_quadrature_order must be a ' + 'positive integer!') # Settings dimension = self._lower_bound.size truncation_order = self._truncation_order galerkin_size = ot.EnumerateFunction( dimension).getStrataCumulatedCardinal(legendre_galerkin_order) if legendre_quadrature_order is None: legendre_quadrature_order = 2 * legendre_galerkin_order + 1 # Check if the current settings are compatible if truncation_order > galerkin_size: raise ValueError( 'The truncation order must be less than or ' + 'equal to the size of the functional basis in the chosen ' + 'Legendre Galerkin scheme. Current size of the galerkin basis ' + 'only allows to get %d terms in the KL expansion.' % galerkin_size) # Construction of the Galerkin basis: tensorized Legendre polynomials tensorized_legendre_polynomial_factory = \ ot.PolynomialFamilyCollection([ot.LegendreFactory()] * dimension) tensorized_legendre_polynomial_factory = \ ot.OrthogonalProductPolynomialFactory( tensorized_legendre_polynomial_factory) tensorized_legendre_polynomials = \ [tensorized_legendre_polynomial_factory.build(i) for i in range(galerkin_size)] # Compute matrix C coefficients using Gauss-Legendre quadrature polyColl = ot.PolynomialFamilyCollection([ot.LegendreFactory()] * dimension * 2) polynoms = ot.OrthogonalProductPolynomialFactory(polyColl) U, W = polynoms.getNodesAndWeights( ot.Indices([legendre_quadrature_order] * dimension * 2)) W = np.ravel(W) scale = (self._upper_bound - self._lower_bound) / 2. shift = (self._upper_bound + self._lower_bound) / 2. U = np.array(U) X = np.repeat(scale, 2) * U + np.repeat(shift, 2) if self.verbose: print('Computing matrix C...') try: available_memory = int(.9 * get_available_memory()) except: if self.verbose: print('WRN: Available memory estimation failed! ' 'Assuming 1Gb is available (first guess).') available_memory = 1024**3 max_size = int(available_memory / 8 / galerkin_size**2) batch_size = min(W.size, max_size) if self.verbose and batch_size < W.size: print('RAM: %d Mb available' % (available_memory / 1024**2)) print('RAM: %d allocable terms / %d total terms' % (max_size, W.size)) print('RAM: %d loops required' % np.ceil(float(W.size) / max_size)) while True: C = np.zeros((galerkin_size, galerkin_size)) try: n_done = 0 while n_done < W.size: covariance_at_X = self._covariance(X[n_done:(n_done + batch_size)]) H1 = np.vstack([ np.ravel(tensorized_legendre_polynomials[i]( U[n_done:(n_done + batch_size), :dimension])) for i in range(galerkin_size) ]) H2 = np.vstack([ np.ravel(tensorized_legendre_polynomials[i]( U[n_done:(n_done + batch_size), dimension:])) for i in range(galerkin_size) ]) C += np.sum(W[np.newaxis, np.newaxis, n_done:(n_done + batch_size)] * covariance_at_X[np.newaxis, np.newaxis, :] * H1[np.newaxis, :, :] * H2[:, np.newaxis, :], axis=-1) del covariance_at_X, H1, H2 n_done += batch_size break except MemoryError: batch_size /= 2 C *= np.prod(self._upper_bound - self._lower_bound)**2. # Matrix B is orthonormal up to some constant B = np.diag( np.repeat(np.prod(self._upper_bound - self._lower_bound), galerkin_size)) # Solve the generalized eigenvalue problem C D = L B D in L, D if self.verbose: print('Solving generalized eigenvalue problem...') eigenvalues, eigenvectors = linalg.eigh(C, b=B, lower=True) eigenvalues, eigenvectors = eigenvalues.real, eigenvectors.real # Sort the eigensolutions in the descending order of eigenvalues order = eigenvalues.argsort()[::-1] eigenvalues = eigenvalues[order] eigenvectors = eigenvectors[:, order] # Truncate the expansion eigenvalues = eigenvalues[:truncation_order] eigenvectors = eigenvectors[:, :truncation_order] # Eliminate unsignificant negative eigenvalues if eigenvalues.min() <= 0.: if eigenvalues.min() > .01 * eigenvalues.max(): raise Exception( 'The smallest significant eigenvalue seems ' + 'to be negative... Check the positive definiteness of the ' + 'covariance function.') else: truncation_order = np.nonzero(eigenvalues <= 0)[0][0] eigenvalues = eigenvalues[:truncation_order] eigenvectors = eigenvectors[:, :truncation_order] self._truncation_order = truncation_order print('WRN: truncation_order was too large.') print('It has been reset to: %d' % truncation_order) # Define eigenfunctions class LegendrePolynomialsBasedEigenFunction(): def __init__(self, vector): self._vector = vector def __call__(self, x): x = np.asanyarray(x) if x.ndim <= 1: x = np.atleast_2d(x).T u = (x - shift) / scale return np.sum([ np.ravel(tensorized_legendre_polynomials[i](u)) * self._vector[i] for i in range(truncation_order) ], axis=0) # Set attributes self._eigenvalues = eigenvalues self._eigenfunctions = [ LegendrePolynomialsBasedEigenFunction(vector) for vector in eigenvectors.T ] self._legendre_galerkin_order = legendre_galerkin_order self._legendre_quadrature_order = legendre_quadrature_order
#! /usr/bin/env python from __future__ import print_function import openturns as ot try: mesh = ot.IntervalMesher(ot.Indices(1, 9)).build(ot.Interval(-1.0, 1.0)) factory = ot.KarhunenLoeveP1Factory(mesh, 0.0) eigenValues = ot.NumericalPoint() KLModes = factory.buildAsProcessSample(ot.AbsoluteExponential([1.0]), eigenValues) print("KL modes=", KLModes) print("KL eigenvalues=", eigenValues) cov1D = ot.AbsoluteExponential([1.0]) KLFunctions = factory.build(cov1D, eigenValues) print("KL functions=", KLFunctions) print("KL eigenvalues=", eigenValues) R = ot.CorrelationMatrix(2, [1.0, 0.5, 0.5, 1.0]) scale = [1.0] amplitude = [1.0, 2.0] cov2D = ot.ExponentialModel(scale, amplitude, R) KLFunctions = factory.build(cov2D, eigenValues) print("KL functions=", KLFunctions) print("KL eigenvalues=", eigenValues) except: import sys print("t_KarhunenLoeveP1Factory_std.py", sys.exc_info()[0], sys.exc_info()[1])
#! /usr/bin/env python import openturns as ot ot.TESTPREAMBLE() size = 4 indices = ot.Indices(size, 0) indices.fill() print("indices=", repr(indices)) print("are indices valid with bound=", size, "? ", indices.check(size)) print("are indices valid with bound=", size // 2, "? ", indices.check(size // 2)) indices[0] = indices[size - 1] print("indices after transformation=", repr(indices)) print("are indices valid with bound=", size, "? ", indices.check(size)) print("complement wrt 7=", indices.complement(7)) print("empty complement=", ot.Indices().complement(7)) # slicing indices2 = ot.Indices((1, 2, 3, 4, 5)) print('indices2[2] = ', indices2[2]) print('indices2[1:3] = ', indices2[1:3]) print('indices2[:] = ', indices2[:]) indices2[1:3] = (88, 99) print('indices2 = ', indices2) index = indices2.find(88) assert index < indices2.getSize(), "wrong index" assert indices2[index] == 88, "wrong index"
# simulate the true physical model basis = ot.ConstantBasisFactory(4).build() covarianceModel = ot.SquaredExponential([5.03148, 13.9442, 20, 20], [15.1697]) krigingModel = ot.KrigingAlgorithm(inputSample, signals, covarianceModel, basis) ot.RandomGenerator.SetSeed(0) np.random.seed(0) krigingModel.run() physicalModel = krigingModel.getResult().getMetaModel() ####### Test on the POD models ################### # Test hitmiss without Box Cox with rf classifier np.random.seed(0) ot.RandomGenerator.SetSeed(0) ot.RandomGenerator.SetState(ot.RandomGeneratorState(ot.Indices([0] * 768), 0)) POD1 = otpod.AdaptiveHitMissPOD(inputDOE, outputDOE, physicalModel, 20, detection) POD1.run() detectionSize1 = POD1.computeDetectionSize(0.9, 0.95) def test_1_a90(): np.testing.assert_almost_equal(detectionSize1[0], 4.71811745363573, decimal=5) def test_1_a95(): np.testing.assert_almost_equal(detectionSize1[1], 5.35497504836619,
#!/usr/bin/env python # coding: utf-8 from __future__ import print_function import openturns as ot import persalys # mesh model meshModel0 = persalys.GridMeshModel() print(meshModel0) # mesh model meshModel1 = persalys.GridMeshModel([persalys.Variable('x', 'aDescription')], ot.Interval(0., 12.), ot.Indices(1, 20)) print(meshModel1) filename = 'mesh.csv' mesh = ot.IntervalMesher(ot.Indices(1, 20)).build(ot.Interval(0., 12.)) mesh.getVertices().exportToCSVFile(filename) # mesh model meshModel2 = persalys.ImportedMeshModel() print(meshModel2) # mesh model meshModel3 = persalys.ImportedMeshModel( [persalys.Variable('z', 'aDescription')], 'mesh.csv', ot.Indices(1, 0))
# Then g acts on processes X: Omega * R^nSpat --> R^2 # # Create a trend function fTrend: R^n --> R^q # for example for myXtProcess of dimension 2 # defined on a bidimensional mesh # fTrend : R^2 --> R^2 # (t1, t2) --> (1+2t1, 1+3t2) fTrend = ot.SymbolicFunction(['t1', 't2'], ['1+2*t1', '1+3*t2']) # # Create a Gaussian process of dimension 2 # which mesh is of box of dimension 2 myIndices = ot.Indices([80, 40]) myMesher = ot.IntervalMesher(myIndices) lowerBound = [0., 0.] upperBound = [2., 1.] myInterval = ot.Interval(lowerBound, upperBound) myMesh = myMesher.build(myInterval) # Define a bidimensional temporal Gaussian process on the mesh # with independent components amplitude = [1.0, 1.0] scale = [0.2, 0.3] myCovModel = ot.ExponentialModel(scale, amplitude) myXtProcess_temp = ot.GaussianProcess(myCovModel, myMesh) # Non linear transformation of myXtProcess
E = ot.ParametrizedDistribution(ot.LogNormalMuSigmaOverMu(3e4, 0.12)) F = ot.ParametrizedDistribution(ot.LogNormalMuSigmaOverMu(0.1, 0.20)) list_marginals = [L, b, h, E, F] distribution = ot.ComposedDistribution(list_marginals) distribution.setDescription(('L', 'b', 'h', 'E', 'F')) dim = distribution.getDimension() level_number = 4 trajectories = 10 jump_step = int(level_number / 2) levels = [level_number] * dim # set the bounds of the grid experiment bound = ot.Interval( [marginal.computeQuantile(0.01)[0] for marginal in list_marginals], [marginal.computeQuantile(0.99)[0] for marginal in list_marginals]) experiment = otmorris.MorrisExperimentGrid(levels, bound, trajectories) experiment.setJumpStep(ot.Indices([jump_step] * dim)) # create and compute the design of experiments input_sample = experiment.generate() print("Morris experiment generated from grid = ") print(input_sample) output_sample = poutre(input_sample) # run the Morris analysis morris = otmorris.Morris(input_sample, output_sample, bound) print("E(|EE|) = ", morris.getMeanAbsoluteElementaryEffects()) print("E(EE) = ", morris.getMeanElementaryEffects()) print("V(|EE|)^{1/2} = ", morris.getStandardDeviationElementaryEffects())
# # thanks to the method *getInverse()* which produces an object of type # *InverseBoxCoxTransform* that can be evaluated on a field. # The new field based shares the same mesh than the initial field. # # %% from __future__ import print_function import openturns as ot import openturns.viewer as viewer from matplotlib import pylab as plt ot.Log.Show(ot.Log.NONE) # %% # Define a process myIndices = ot.Indices([10, 5]) myMesher = ot.IntervalMesher(myIndices) myInterval = ot.Interval([0.0, 0.0], [2.0, 1.0]) myMesh = myMesher.build(myInterval) amplitude = [1.0] scale = [0.2, 0.2] myCovModel = ot.ExponentialModel(scale, amplitude) myXproc = ot.GaussianProcess(myCovModel, myMesh) g = ot.SymbolicFunction(['x1'], ['exp(x1)']) myDynTransform = ot.ValueFunction(g, myMesh) myXtProcess = ot.CompositeProcess(myDynTransform, myXproc) # %% # Draw a field field = myXtProcess.getRealization() graph = field.drawMarginal(0)
#! /usr/bin/env python import openturns as ot import pickle from io import BytesIO obj_list = [] obj_list.append(ot.Point([1.6, -8.7])) obj_list.append(ot.Sample([[4.6, -3.7], [8.4, 6.3]])) obj_list.append(ot.Description(['x', 'y', 'z'])) obj_list.append(ot.Indices([1, 2, 4])) obj_list.append(ot.Matrix([[1, 2], [3, 4]])) obj_list.append(ot.SymbolicFunction(['x1', 'x2'], ['y1=x1+x2'])) src = BytesIO() for obj in obj_list: pickle.dump(obj, src) src.seek(0) for obj in obj_list: obj2 = pickle.load(src) print(('object: ' + str(obj))) print(('same: ' + str(obj2 == obj) + '\n'))
#! /usr/bin/env python from __future__ import print_function import openturns as ot # Default constructor flat1 = ot.IndicesCollection() print('flat1=', flat1) # Constructor with Collection<Indices> coll2 = [] ind1 = ot.Indices(1) ind4 = ot.Indices(4) ind4.fill(8) ind3 = ot.Indices(3) ind3.fill(3) coll2.append(ind1) coll2.append(ind4) coll2.append(ind3) flat2 = ot.IndicesCollection(coll2) print('flat2=', flat2) # Copy constructor flat3 = ot.IndicesCollection(flat2) print('flat3=', flat3) # Strided coll4 = [] coll4.append(ind4) coll4.append(ind4) coll4.append(ind4)
# Get 95% quantile quantile = distribution.computeQuantile(0.95) print("Quantile=", repr(quantile)) print("entropy=%.6f" % distribution.computeEntropy()) for i in range(6): print("standard moment n=", i, " value=", distribution.getStandardMoment(i)) print("Standard representative=", distribution.getStandardRepresentative()) print("parameter=", distribution.getParameter()) print("parameterDescription=", distribution.getParameterDescription()) parameter = distribution.getParameter() parameter[-1] = 0.3 distribution.setParameter(parameter) print("parameter=", distribution.getParameter()) # To prevent automatic compaction ot.ResourceMap.SetAsUnsignedInteger("UserDefined-SmallSize", 5) sample = ot.Sample(40, 3) for i in range(4): for j in range(3): sample[i, j] = 10 * (i // 3 + 1) + 0.1 * (j + 1) multivariateUserDefined = ot.UserDefined(sample) print("Multivariate UserDefined=", multivariateUserDefined) print("Marginal 0=", multivariateUserDefined.getMarginal(0)) indices = ot.Indices(2) indices[0] = 2 indices[1] = 0 print("Marginal (2, 0)=", multivariateUserDefined.getMarginal(indices))
# With the model, we want to compare values # We compare values computed with theoritical values plotSample = ot.Sample(frequencyGrid.getN(), 3) # Loop of comparison ==> data are saved in plotSample for k in range(frequencyGrid.getN()): freq = frequencyGrid.getStart() + k * frequencyGrid.getStep() plotSample[k, 0] = freq plotSample[k, 1] = abs(myEstimatedModel_PS(freq)[0, 0]) plotSample[k, 2] = abs(model(freq)[0, 0]) # Graph section # We build 2 curves # each one is function of frequency values ind = ot.Indices(2) ind.fill() # Some cosmetics : labels, legend position, ... graph = ot.Graph("Spectral model estimation", "Frequency", "Spectral density function", True, "topright", 1.0, ot.GraphImplementation.LOGY) # The first curve is the estimate density as function of frequency curve1 = ot.Curve(plotSample.getMarginal(ind)) curve1.setColor('blue') curve1.setLegend('estimate model') # The second curve is the theoritical density as function of frequency ind[1] = 2 curve2 = ot.Curve(plotSample.getMarginal(ind)) curve2.setColor('red')
val1 = ref_point2[0] val2 = ref_point2[1] print("point2[0] = %.6f" % val1) print("point2[1] = %.6f" % val2) # Copy constructor point3 = ot.Point(ref_point1) ref_point3 = point3 val1 = ref_point3[0] val2 = ref_point3[1] print("point3[0] = %.6f" % val1) print("point3[1] = %.6f" % val2) # Constructor from indices indices = ot.Indices(5) indices.fill() point = ot.Point(indices) print("point=", point) # Constructor from python sequence point4 = ot.Point((1000., 1001.)) i = 0 for coord in point4: print("point4[%d] = %.6f" % (i, coord)) i += 1 # Comparison operator if ref_point2 != ref_point2: print( "OT::Base::Type::Point.operator == does NOT return the correct value. Says that point2 and point2 are DIFFERENT though they are equal.")
import openturns as ot from matplotlib import pyplot as plt from openturns.viewer import View # Create a process X: R^2 --> R^2 # Define a bi dimensional mesh as a box myIndices = ot.Indices([40, 20]) myMesher = ot.IntervalMesher(myIndices) lowerBound = [0.0, 0.0] upperBound = [2.0, 1.0] myInterval = ot.Interval(lowerBound, upperBound) myMesh = myMesher.build(myInterval) # Define a scalar temporal Gaussian process on the mesh # this process is stationary # myXproc R^2 --> R amplitude = [1.0] scale = [0.2, 0.2] myCovModel = ot.ExponentialModel(scale, amplitude) myXproc = ot.GaussianProcess(myCovModel, myMesh) # Transform myXproc to make its variance depend on the vertex (s,t) # and to get a positive process # thanks to the spatial function g # myXtProcess R --> R g = ot.SymbolicFunction(['x1'], ['exp(x1)']) myDynTransform = ot.ValueFunction(g, 2) myXtProcess = ot.CompositeProcess(myDynTransform, myXproc) myField = myXtProcess.getRealization()
#! /usr/bin/env python from __future__ import print_function import openturns as ot ot.TESTPREAMBLE() size = 4 indices = ot.Indices(size, 0) indices.fill() print("indices=", repr(indices)) print("are indices valid with bound=", size, "? ", indices.check(size)) print("are indices valid with bound=", size // 2, "? ", indices.check(size // 2)) indices[0] = indices[size - 1] print("indices after transformation=", repr(indices)) print("are indices valid with bound=", size, "? ", indices.check(size)) print("complement wrt 7=", indices.complement(7)) # slicing indices2 = ot.Indices((1, 2, 3, 4, 5)) print('indices2[2] = ', indices2[2]) print('indices2[1:3] = ', indices2[1:3]) print('indices2[:] = ', indices2[:]) indices2[1:3] = (88, 99) print('indices2 = ', indices2) index = indices2.find(88) assert index < indices2.getSize(), "wrong index" assert indices2[index] == 88, "wrong index"