def __toBaseDataFormat__(self, data, idx): mesh = self.__Meshes__[idx] if isinstance(data, ot.Point): if mesh is not None: dataBaseFormat = ot.Field(mesh, [[dat] for dat in data]) return dataBaseFormat else: return data elif isinstance(data, ot.Interval): lowBounds = data.getLowerBound() upperBounds = data.getUpperBound() if mesh is not None: lowBoundsBaseFormat = ot.Field(mesh, [[bnd] for bnd in lowBounds]) upperBoundsBaseFormat = ot.Field(mesh, [[bnd] for bnd in upperBounds]) return lowBoundsBaseFormat, upperBoundsBaseFormat else: return data elif isinstance(data, ot.Distribution): print( 'Cannot convert distribution to field, dimensional correlation lost.' ) return data elif isinstance(data, (bool, int, float)): return data else: raise NotImplementedError
def myPyFunc(X): mesh = X.getMesh() values = X.getValues() * ([2.0] * X.getValues().getDimension()) values.setDescription( ot.Description.BuildDefault(values.getDimension(), "Y")) Y = ot.Field(mesh, values) return Y
def liftAsProcessSample(self, coefficients): '''Function to lift a sample of coefficients into a collections of process samples and points. Parameters ---------- coefficients : ot.Sample sample of values, follwing a centered normal law in general Returns ------- processes : list ordered list of samples of scalars (ot.Sample) and field samples (ot.ProcessSample) ''' assert isinstance(coefficients, (ot.Sample, ot.SampleImplementation)) print('Lifting as process sample') jumpDim = 0 processes = [] for i in range(self.__field_distribution_count__): if self.__isProcess__[i] : if not self.__liftWithMean__: processes.append(self.__KL_lifting__[i](coefficients[:, jumpDim : jumpDim + self.__mode_count__[i]])) else : processSample = self.__KL_lifting__[i](coefficients[:, jumpDim : jumpDim + self.__mode_count__[i]]) addConstant2Iterable(processSample, self.__means__[i]) processes.append(processSample) else : if not self.__liftWithMean__: processSample = ot.ProcessSample(ot.Mesh(), 0, 1) val_sample = self.__KL_lifting__[i](coefficients[:, jumpDim : jumpDim + self.__mode_count__[i]]) for j, value in enumerate(val_sample): field = ot.Field(ot.Mesh(),1) field.setValueAtIndex(0,value) processSample.add(field) processes.append(processSample) else : processSample = ot.ProcessSample(ot.Mesh(), 0, 1) val_sample = self.__KL_lifting__[i](coefficients[:, jumpDim : jumpDim + self.__mode_count__[i]]) mean = self.__means__[i] for j, value in enumerate(val_sample): field = ot.Field(ot.Mesh(),1) field.setValueAtIndex(0,[value[0]+mean]) # adding mean processSample.add(field) processes.append(processSample) jumpDim += self.__mode_count__[i] return processes
def _exec(self, X): inputTG = X.getTimeGrid() inputValues = X.getValues() f = ot.NumericalMathFunction(ot.PiecewiseLinearEvaluationImplementation( [x[0] for x in inputTG.getVertices()], inputValues)) outputValues = ot.NumericalSample(0, 1) for t in self.outputGrid_.getVertices(): kernel = ot.Normal(t[0], 0.05) def pdf(X): return [kernel.computePDF(X)] weight = ot.NumericalMathFunction(ot.PythonFunction(1, 1, pdf)) outputValues.add(self.algo_.integrate( weight * f, kernel.getRange())) return ot.Field(self.outputGrid_, outputValues)
def readProcessSample(fname): """ Return a ProcessSample from a text file. Assume the mesh is regular [0,1]. """ # Dataset data = np.loadtxt(fname) # Create the mesh n_nodes = data.shape[1] mesher = ot.IntervalMesher([n_nodes - 1]) Interval = ot.Interval([0.0], [1.0]) mesh = mesher.build(Interval) # Create the ProcessSample from the data n_fields = data.shape[0] dim_fields = 1 processSample = ot.ProcessSample(mesh, n_fields, dim_fields) for i in range(n_fields): trajectory = ot.Sample([[x] for x in data[i, :]]) processSample[i] = ot.Field(mesh, trajectory) return processSample
def dummyFunction2Wrap(field_10x10, field_100x1, scalar_0): ## Function doing some operation on the 2 field and a scalar and returning a field outDim = 1 NElem = [10] mesher = ot.IntervalMesher(NElem) lowerBound = [0] upperBound = [10] interval = ot.Interval(lowerBound, upperBound) mesh = mesher.build(interval) outField = ot.Field(mesh, [[0]] * mesh.getVerticesNumber()) for i in range(10): for j in range(10): if field_10x10[i][0] * field_100x1[i + j][0] > scalar_0[0][0]: outField.setValueAtIndex(i, [ field_10x10[i][0] * field_100x1[(i + 1) * (j + 1) - 1][0] - scalar_0[0][0] ]) else: outField.setValueAtIndex( i, [(field_10x10[j][0] - scalar_0[0][0]) / field_100x1[(i + 1) * (j + 1) - 1][0]]) return outField
def myPyFunc(X): mesh = X.getMesh() Y = ot.Field(mesh, ot.Normal().getSample(mesh.getVerticesNumber())) return Y
myFunc = ot.PythonDynamicalFunction(in_dim, out_dim, spatial_dim, myPyFunc) print('myFunc=', myFunc) vertices = [] vertices.append([0.0, 0.0, 0.0]) vertices.append([0.0, 0.0, 1.0]) vertices.append([0.0, 1.0, 0.0]) vertices.append([0.0, 1.0, 1.0]) vertices.append([1.0, 0.0, 0.0]) vertices.append([1.0, 0.0, 1.0]) vertices.append([1.0, 1.0, 0.0]) vertices.append([1.0, 1.0, 1.0]) simplicies = [] simplicies.append([0, 1, 2, 4]) simplicies.append([3, 5, 6, 7]) simplicies.append([1, 2, 3, 6]) simplicies.append([1, 2, 4, 6]) simplicies.append([1, 3, 5, 6]) simplicies.append([1, 4, 5, 6]) mesh3D = ot.Mesh(vertices, simplicies) values = ot.Normal(spatial_dim).getSample(mesh3D.getVerticesNumber()) X = ot.Field(mesh3D, values) print('X=', X) Y = myFunc(X) print('Y=', Y) print('myFunc input dimension=', myFunc.getInputDimension()) print('myFunc output dimension=', myFunc.getOutputDimension()) print('myFunc spatial dimension=', myFunc.getSpatialDimension()) print('called ', myFunc.getCallsNumber(), ' times')
# Get the number of calls print("called ", myFunc.getCallsNumber(), " times") # 2-d mesh n = 5 indices = [n, n] intervalMesher = ot.IntervalMesher(indices) interval = ot.Interval([0.0] * 2, [1.0] * 2) mesh2D = intervalMesher.build(interval) def f2Pfunc(X): Y = ot.Sample(X).computeMean() return Y field2PFunction = ot.PythonFieldToPointFunction(mesh2D, 1, 1, f2Pfunc) fieldFunction = ot.ValueFunction(ot.SymbolicFunction(["x", "y"], ["3x"]), mesh2D) myFunc = ot.FieldToPointConnection(field2PFunction, fieldFunction) print("myFunc=", myFunc) # Get the input and output description print("myFunc input description=", myFunc.getInputDescription()) print("myFunc output description=", myFunc.getOutputDescription()) # Get the input and output dimension print("myFunc input dimension=", myFunc.getInputDimension()) print("myFunc output dimension=", myFunc.getOutputDimension()) field = ot.Field(mesh2D, ot.Normal(2).getSample(mesh2D.getVerticesNumber())) print("myFunc(field)=", myFunc(field)) print("called ", myFunc.getCallsNumber(), " times")
vertices.append([1.0, 1.0, 0.0]) vertices.append([1.0, 1.0, 1.0]) simplicies = [] simplicies.append([0, 1, 2, 4]) simplicies.append([3, 5, 6, 7]) simplicies.append([1, 2, 3, 6]) simplicies.append([1, 2, 4, 6]) simplicies.append([1, 3, 5, 6]) simplicies.append([1, 4, 5, 6]) mesh3D = ot.Mesh(vertices, simplicies) s = 3 values = ot.Normal(s).getSample(mesh3D.getVerticesNumber()) field = ot.Field(mesh3D, values) tree = ot.KDTree(vertices) print('field=', field) print('input dim=', field.getInputDimension()) print('value[4]=', field.getValueAtIndex(4)) print('value[4, 0]=%.6g' % field[4, 0]) print('nearest[2]=', field.getValueAtIndex(tree.query(field[2]))) print('mesh=', field.getMesh()) print('input mean=', field.getInputMean()) print('deformed=', field.asDeformedMesh()) print('description=', field.getDescription()) fname = 'field.vtk' field.exportToVTKFile(fname) with open(fname) as f: data = f.read()
def test_ProcessHighDensityRegionAlgorithm(mock_show): ot.RandomGenerator.SetSeed(0) numberOfPointsForSampling = 500 ot.ResourceMap.Set('Distribution-MinimumVolumeLevelSetBySampling', 'true') ot.ResourceMap.Set('Distribution-MinimumVolumeLevelSetSamplingSize', str(numberOfPointsForSampling)) # Dataset fname = os.path.join(os.path.dirname(__file__), 'data', 'npfda-elnino.dat') data = np.loadtxt(fname) # Create the mesh n_nodes = data.shape[1] mesher = ot.IntervalMesher([n_nodes - 1]) Interval = ot.Interval([0.0], [1.0]) mesh = mesher.build(Interval) # Create the ProcessSample from the data n_fields = data.shape[0] dim_fields = 1 sample = ot.ProcessSample(mesh, n_fields, dim_fields) for i in range(n_fields): trajectory = ot.Sample(data[i, :], 1) sample[i] = ot.Field(mesh, trajectory) # Compute HDRPlot hdr = ProcessHighDensityRegionAlgorithm(sample) hdr.setContoursAlpha([0.8, 0.5]) hdr.setOutlierAlpha(0.8) hdr.run() hdr.summary() hdr.dimensionReductionSummary() # Plot ACP graph = hdr.drawDimensionReduction() View(graph) plt.show(graph) # Plot Density fig, axs, graphs = hdr.drawDensity() plt.show() # Plot outlier trajectories graph = hdr.drawOutlierTrajectories(drawInliers=True, discreteMean=True) View(graph) plt.show() graph = hdr.drawOutlierTrajectories(bounds=False) View(graph) plt.show() outlier_indices = hdr.computeOutlierIndices() expected_outlier_indices = [3, 7, 22, 32, 33, 41, 47] assert_equal(outlier_indices, expected_outlier_indices) # Check data assert_equal(hdr.getNumberOfTrajectories(), 54) assert_equal(hdr.getNumberOfVertices(), 12) assert_equal(hdr.getNumberOfComponents(), 2) assert_array_almost_equal(hdr.getPartOfExplainedVariance(), 0.86569783, 4) assert_array_almost_equal(hdr.getExplainedVarianceRatio(), [0.60759627, 0.25810156], 4) # Check higher dimension hdr = ProcessHighDensityRegionAlgorithm(sample, numberOfComponents=3) hdr.setOutlierAlpha(0.6) hdr.run() fig, axs, graphs = hdr.drawDensity() plt.show() fig, axs, graphs = hdr.drawDensity(drawData=True) plt.show()
inSample, weights = weightedExperiment.generateWithWeights() print("Sample model") t0 = time() outSample = model(inSample) t1 = time() print("t=", t1 - t0, "s, speed=", inSample.getSize() / (t1 - t0), "evals/s") basis = ot.OrthogonalProductPolynomialFactory([ot.HermiteFactory()] * dim) adaptive = ot.FixedStrategy(basis, dim + 1) projection = ot.LeastSquaresStrategy(weightedExperiment) algo = ot.FunctionalChaosAlgorithm(inSample, outSample, distribution, adaptive, projection) algo.run() vector = ot.FunctionalChaosRandomVector(algo.getResult()) # Field of Sobol indices #for i in range(dim): for i in range(15, 16): print("i=", i) sobol = [ vector.getSobolIndex(i, j) for j in range(mesh.getVerticesNumber()) ] field = ot.Field(mesh, [[x] for x in sobol]) graph = field.draw() graph.setTitle("Sobol index field - component " + str(i)) #graph.add(field.drawMarginal(0)) view = View(graph, (800, 600)) view._ax[0].axis("equal") view.save("../plot_sobol_field_" + str(i).zfill(4) + ".png") view.close()
def test_ProcessHighDensityRegionAlgorithm(mock_show): ot.RandomGenerator.SetSeed(0) numberOfPointsForSampling = 500 ot.ResourceMap.Set('Distribution-MinimumVolumeLevelSetBySampling', 'true') ot.ResourceMap.Set('Distribution-MinimumVolumeLevelSetSamplingSize', str(numberOfPointsForSampling)) # fname = os.path.join(os.path.dirname(__file__), 'data', 'npfda-elnino.dat') dataR = np.loadtxt(fname) # Create the mesh numberOfNodes = dataR.shape[1] myMesher = ot.IntervalMesher([numberOfNodes - 1]) myInterval = ot.Interval([0.0], [1.0]) myMesh = myMesher.build(myInterval) # Create the ProcessSample from the data numberOfFields = dataR.shape[0] dimensionOfFields = 1 myps = ot.ProcessSample(myMesh, numberOfFields, dimensionOfFields) for i in range(numberOfFields): thisTrajectory = ot.Sample(dataR[i, :], 1) myps[i] = ot.Field(myMesh, thisTrajectory) # Compute HDRPlot myhdrplot = ProcessHighDensityRegionAlgorithm(myps) myhdrplot.setContoursAlpha([0.8, 0.5]) myhdrplot.setOutlierAlpha(0.8) myhdrplot.run() myhdrplot.summary() myhdrplot.dimensionReductionSummary() # Plot ACP myhdrplot.plotDimensionReduction() plt.show() # Plot Density plotData = True plotOutliers = True myhdrplot.plotDensity(plotData, plotOutliers) plt.show() # Plot trajectories myhdrplot.plotTrajectories() plt.show() # Plot outlier trajectories myhdrplot.plotOutlierTrajectories() plt.show() outlierIndices = myhdrplot.computeOutlierIndices() expected_outlierIndices = [3, 7, 22, 32, 33, 41, 47] assert_equal(outlierIndices, expected_outlierIndices) # Check data assert_equal(myhdrplot.getNumberOfTrajectories(), 54) assert_equal(myhdrplot.getNumberOfVertices(), 12) assert_equal(myhdrplot.getNumberOfComponents(), 2) assert_array_almost_equal(myhdrplot.getPartOfExplainedVariance(), 0.86569783, 4) assert_array_almost_equal(myhdrplot.getExplainedVarianceRatio(), [0.60759627, 0.25810156], 4)
algo_kl_process_1D = ot.KarhunenLoeveP1Algorithm( mesh_1D, process_1D.getCovarianceModel()) algo_kl_process_1D.run() kl_results_1D = algo_kl_process_1D.getResult() algo_kl_process_2D = ot.KarhunenLoeveP1Algorithm( mesh_2D, process_2D.getCovarianceModel()) algo_kl_process_2D.run() kl_results_2D = algo_kl_process_2D.getResult() ### Now let's compose our Karhunen Loeve Results and our distributions. composedKLResultsAndDistributions = aklr.AggregatedKarhunenLoeveResults( [kl_results_2D, kl_results_1D, scalar_distribution]) ### Now let's see if we manage to project and lift the realizations we had before. realizationFields = [field_2D, field_1D, ot.Field(ot.Mesh(), [scalar_0[0]])] projectedCoeffs = composedKLResultsAndDistributions.project(realizationFields) print('Projected coefficients are :', projectedCoeffs) liftedFieldsO = composedKLResultsAndDistributions.liftAsField(projectedCoeffs) print('Lifted fields are :', liftedFieldsO) ### Now let's use our function wrapper and see if we get the same results! dummyWrapper = klgfw.KarhunenLoeveGeneralizedFunctionWrapper( composedKLResultsAndDistributions, dummyFunction2Wrap, None, 1) print('testing call:') dummyWrapper(projectedCoeffs) class TestComposeAndWrap(unittest.TestCase): def testLiftAndProject(self,
def _exec(self, X): Xs = ot.Sample(X) Y = Xs * ([2.0] * Xs.getDimension()) return Y def isActingPointwise(self): return True F = FUNC() print('in_dim=' + str(F.getInputDimension()) + ' out_dim=' + str(F.getOutputDimension()) + ' spatial_dim=' + str(F.getInputMesh().getDimension())) X = ot.Field(mesh, ot.Normal(2).getSample(11)) print(F(X.getValues())) Xsample = ot.ProcessSample(5, X) print(F(Xsample)) # Instance creation myFunc = ot.FieldFunction(F) # Copy constructor newFunc = ot.FieldFunction(myFunc) print(('myFunc input dimension= ' + str(myFunc.getInputDimension()))) print(('myFunc output dimension= ' + str(myFunc.getOutputDimension()))) print(myFunc(X.getValues()))
import openturns as ot from matplotlib import pyplot as plt from openturns.viewer import View from math import sqrt mesh = ot.IntervalMesher([256]).build(ot.Interval(-1.0, 1.0)) threshold = 0.001 factory = ot.KarhunenLoeveP1Factory(mesh, threshold) model = ot.AbsoluteExponential(1, 1.0) ev = ot.NumericalPoint() modes = factory.buildAsProcessSample(model, ev) for i in range(modes.getSize()): modes[i] = ot.Field(mesh, modes[i].getValues() * [sqrt(ev[i])]) g = modes.drawMarginal(0) g.setXTitle("$t$") g.setYTitle("$\sqrt{\lambda_n}\phi_n$") fig = plt.figure(figsize=(6, 4)) plt.suptitle("P1 approx. of KL expansion for $C(s,t)=e^{-|s-t|}$") axis = fig.add_subplot(111) axis.set_xlim(auto=True) View(g, figure=fig, axes=[axis], add_legend=False)
#! /usr/bin/env python import openturns as ot import openturns.testing as ott mesh = ot.RegularGrid(0.0, 1.0, 4) values = ot.Sample([[0.5], [1.5], [1.0], [-0.5]]) field = ot.Field(mesh, values) evaluation = ot.P1LagrangeEvaluation(field) x = [2.3] y = evaluation(x) print(y) ott.assert_almost_equal(y, [0.55]) # Learning sample on meshD mesher = ot.IntervalMesher([7, 7]) lowerbound = [-1.0, -1.0] upperBound = [1, 1] interval = ot.Interval(lowerbound, upperBound) meshD = mesher.build(interval) sample = ot.ProcessSample(meshD, 10, 1) field = ot.Field(meshD, 1) for k in range(sample.getSize()): field.setValues(ot.Normal().getSample(64)) sample[k] = field lagrange = ot.P1LagrangeEvaluation(sample) # New mesh mesh = ot.Mesh( ot.MonteCarloExperiment(ot.ComposedDistribution([ot.Uniform(-1, 1)] * 2), 200).generate())
# first argument: super(FUNC, self).__init__(2, 2, 1) self.setInputDescription(['R', 'S']) self.setOutputDescription(['T', 'U']) def _exec(self, X): Y = X.getValues().computeMean() return Y F = FUNC() print(('in_dim=' + str(F.getInputDimension()) + ' out_dim=' + str(F.getOutputDimension()) + ' spatial_dim=' + str(F.getSpatialDimension()))) X = ot.Field(ot.RegularGrid(0.0, 0.1, 11), ot.Normal(2).getSample(11)) print(F(X)) Xsample = ot.ProcessSample(5, X) print(F(Xsample)) # Instance creation myFunc = ot.FieldToPointFunction(F) # Copy constructor newFunc = ot.FieldToPointFunction(myFunc) print(('myFunc input dimension= ' + str(myFunc.getInputDimension()))) print(('myFunc output dimension= ' + str(myFunc.getOutputDimension()))) print(myFunc(X))
phi_func = ot.PythonFunction(2, 2, flow) phi = ot.ValueFunction(phi_func) solver = ot.Fehlberg(phi) initialState = [2.0, 2.0] nt = 47 dt = 0.1 timeGrid = ot.RegularGrid(0.0, dt, nt) result = solver.solve(initialState, timeGrid) xMin = result.getMin() xMax = result.getMax() delta = 0.2 * (xMax - xMin) mesh = ot.IntervalMesher([12] * 2).build( ot.Interval(xMin - delta, xMax + delta)) field = ot.Field(mesh, phi_func(mesh.getVertices())) ot.ResourceMap.SetAsScalar("Field-ArrowScaling", 0.1) graph = field.draw() cloud = ot.Cloud(mesh.getVertices()) cloud.setColor("black") graph.add(cloud) curve = ot.Curve(result) curve.setColor("red") curve.setLineWidth(2) graph.add(curve) fig = plt.figure() ax = fig.add_subplot(111) View(graph, figure=fig) plt.suptitle("Lotka-Volterra ODE system") plt.xlabel(r'$y_0$')
#! /usr/bin/env python import openturns as ot ot.TESTPREAMBLE() # A 1D->1D field mesh = ot.IntervalMesher([10]).build(ot.Interval(-2.0, 2.0)) function = ot.SymbolicFunction("x", "x") field = ot.Field(mesh, function(mesh.getVertices())) graph = field.draw() graph = field.drawMarginal(0, False) graph = field.drawMarginal(0, True) # A 2D->1D field mesh = ot.IntervalMesher([10] * 2).build(ot.Interval([-2.0] * 2, [2.0] * 2)) function = ot.SymbolicFunction(["x0", "x1"], ["x0 - x1"]) field = ot.Field(mesh, function(mesh.getVertices())) graph = field.draw() graph = field.drawMarginal(0, False) graph = field.drawMarginal(0, True) # A 2D->2D field function = ot.SymbolicFunction(["x0", "x1"], ["x0", "x1"]) field = ot.Field(mesh, function(mesh.getVertices())) graph = field.draw()
class AggregatedKarhunenLoeveResults(object): '''Class allowing us to aggregated scalar distributions and stochastic processes. Thanks to the Karhunen-Loève expansion we can consider a process with a given covariance model as a vector of random variables following a centered normal law. By stacking the scalar distributions and the vectors representative of the fields we obtain a unique vector representation of our aggregate. It is a link between a non homogenous ensemble of fields and scalars to a unique vector of scalars. ''' def __init__(self, composedKLResultsAndDistributions): '''Initializes the aggregation Parameters ---------- composedKLResultsAndDistributions : list list of ordered ot.Distribution and ot.KarhunenLoeveResult objects ''' self.__KLResultsAndDistributions__ = atLeastList(composedKLResultsAndDistributions) #KLRL : Karhunen Loeve Result List assert len(self.__KLResultsAndDistributions__)>0 self.__field_distribution_count__ = len(self.__KLResultsAndDistributions__) self.__name__ = 'Unnamed' self.__KL_lifting__ = [] self.__KL_projecting__ = [] #Flags self.__isProcess__ = [False]*self.__field_distribution_count__ self.__has_distributions__ = False self.__unified_dimension__ = False self.__unified_mesh__ = False self.__isAggregated__ = False self.__means__ = [.0]*self.__field_distribution_count__ self.__liftWithMean__ = False # checking the nature of eachelement of the input list for i in range(self.__field_distribution_count__): # If element is a Karhunen Loeve decomposition if isinstance(self.__KLResultsAndDistributions__[i], ot.KarhunenLoeveResult): # initializing lifting and projecting objects. self.__KL_lifting__.append(ot.KarhunenLoeveLifting(self.__KLResultsAndDistributions__[i])) self.__KL_projecting__.append(ot.KarhunenLoeveProjection(self.__KLResultsAndDistributions__[i])) self.__isProcess__[i] = True # If element is a distribution elif isinstance(self.__KLResultsAndDistributions__[i], (ot.Distribution, ot.DistributionImplementation)): self.__has_distributions__ = True if self.__KLResultsAndDistributions__[i].getMean()[0] != 0 : print('The mean value of distribution {} at index {} of type {} is not 0.'.format(str('"'+self.__KLResultsAndDistributions__[i].getName()+'"'), str(i), self.__KLResultsAndDistributions__[i].getClassName())) name_distr = self.__KLResultsAndDistributions__[i].getName() self.__means__[i] = self.__KLResultsAndDistributions__[i].getMean()[0] self.__KLResultsAndDistributions__[i] -= self.__means__[i] self.__KLResultsAndDistributions__[i].setName(name_distr) print('Distribution recentered and mean added to list of means') print('Set the "liftWithMean" flag to true if you want to include the mean.') # We can say that the inverse iso probabilistic transformation is analoguous to lifting self.__KL_lifting__.append(self.__KLResultsAndDistributions__[i].getInverseIsoProbabilisticTransformation()) # We can say that the iso probabilistic transformation is analoguous to projecting self.__KL_projecting__.append(self.__KLResultsAndDistributions__[i].getIsoProbabilisticTransformation()) # If the function has distributions it cant be homogenous if not self.__has_distributions__ : self.__unified_mesh__ = all_same([self.__KLResultsAndDistributions__[i].getMesh() for i in range(self.__field_distribution_count__)]) self.__unified_dimension__ = ( all_same([self.__KLResultsAndDistributions__[i].getCovarianceModel().getOutputDimension() for i in range(self.__field_distribution_count__)])\ and all_same([self.__KLResultsAndDistributions__[i].getCovarianceModel().getInputDimension() for i in range(self.__field_distribution_count__)])) # If only one object is passed it has to be an decomposed aggregated process if self.__field_distribution_count__ == 1 : if hasattr(self.__KLResultsAndDistributions__[0], 'getCovarianceModel') and hasattr(self.__KLResultsAndDistributions__[0], 'getMesh'): #Cause when aggregated there is usage of multvariate covariance functions self.__isAggregated__ = self.__KLResultsAndDistributions__[0].getCovarianceModel().getOutputDimension() > self.__KLResultsAndDistributions__[0].getMesh().getDimension() print('Process seems to be aggregated. ') else : print('There is no point in passing only one process that is not aggregated') raise TypeError self.threshold = max([self.__KLResultsAndDistributions__[i].getThreshold() if hasattr(self.__KLResultsAndDistributions__[i], 'getThreshold') else 1e-3 for i in range(self.__field_distribution_count__)]) #Now we gonna get the data we will usually need self.__process_distribution_description__ = [self.__KLResultsAndDistributions__[i].getName() for i in range(self.__field_distribution_count__)] self._checkSubNames() self.__mode_count__ = [self.__KLResultsAndDistributions__[i].getEigenValues().getSize() if hasattr(self.__KLResultsAndDistributions__[i], 'getEigenValues') else 1 for i in range(self.__field_distribution_count__)] self.__mode_description__ = self._getModeDescription() def __repr__(self): '''Visual representation of the object ''' covarianceList = self.getCovarianceModel() eigValList = self.getEigenValues() meshList = self.getMesh() reprStr = '| '.join(['class = ComposedKarhunenLoeveResultsAndDistributions', 'name = {}'.format(self.getName()), 'Aggregation Order = {}'.format(str(self.__field_distribution_count__)), 'Threshold = {}'.format(str(self.threshold)), *['Covariance Model {} = '.format(str(i))+covarianceList[i].__repr__() for i in range(self.__field_distribution_count__)], *['Eigen Value {} = '.format(str(i))+eigValList[i].__repr__() for i in range(self.__field_distribution_count__)], *['Mesh {} = '.format(str(i))+meshList[i].__repr__().partition('data=')[0] for i in range(self.__field_distribution_count__)]]) return reprStr def _checkSubNames(self): """Check's the names of the objects passed to see if they are all unique or if default ones have to be assigned""" if len(set(self.__process_distribution_description__)) != len(self.__process_distribution_description__) : print('The process names are not unique.') print('Using generic name. ') for i, process in enumerate(self.__KLResultsAndDistributions__): oldName = process.getName() newName = 'X_'+str(i) print('Old name was {}, new one is {}'.format(oldName, newName)) process.setName(newName) self.__process_distribution_description__ = [self.__KLResultsAndDistributions__[i].getName() for i in range(self.__field_distribution_count__)] def _getModeDescription(self): """Returns the description of each element of the input vector. (The vector obtained once the processes expanded and stacked with the distribution) """ modeDescription = list() for i, nMode in enumerate(self.__mode_count__): for j in range(nMode): modeDescription.append(self.__process_distribution_description__[i]+'_'+str(j)) return modeDescription def _checkCoefficients(self, coefficients): '''Function to check if the vector passed has the right number of elements''' nModes = sum(self.__mode_count__) if (isinstance(coefficients, ot.Point), len(coefficients) == nModes): return True elif (isinstance(coefficients, (ot.Sample, ot.SampleImplementation)) and len(coefficients[0]) == nModes): return True else : print('The vector passed has not the right number of elements.') print('n° elems: {} != {}'.format(str(len(coefficients)), str(nModes))) return False # new method def getMean(self, i = None): '''Get the mean value of the stochastic processes and the scalar distributions Parameters ---------- i : int index of distribution or process ''' if i is not None: return self.__means__[i] else : return self.__means__ # new method def setMean(self, i, val ): '''Sets the mean of the variable at the index i to a value Parameters ---------- i : int index of distribution or process val : float, int value to which we set the mean ''' self.__means__[i] = val # new method def setLiftWithMean(self, theBool): '''Flag to say if we add the mean to the generated values of fields or scalars If not, all the events are centered Parameters ---------- theBool : bool if to lift the distributions and processes to their non homogenous original space with their mean value or centereds ''' self.__liftWithMean__ = theBool def getClassName(self): '''Returns a list of the class each process/distribution belongs to. ''' classNames=[self.__KLResultsAndDistributions__[i].__class__.__name__ for i in range(self.__field_distribution_count__) ] return list(set(classNames)) def getCovarianceModel(self): '''Returns a list of covariance models for each process. ''' return [self.__KLResultsAndDistributions__[i].getCovarianceModel() if hasattr(self.__KLResultsAndDistributions__[i], 'getCovarianceModel') else None for i in range(self.__field_distribution_count__) ] def getEigenValues(self): '''Returns a list of the eigen values for each process. ''' return [self.__KLResultsAndDistributions__[i].getEigenValues() if hasattr(self.__KLResultsAndDistributions__[i], 'getEigenValues') else None for i in range(self.__field_distribution_count__) ] def getId(self): '''Returns a list containing the ID of each process/distribution. ''' return [self.__KLResultsAndDistributions__[i].getId() for i in range(self.__field_distribution_count__) ] def getImplementation(self): '''Returns a list containing the implementation of each process/distribution, else None. ''' return [self.__KLResultsAndDistributions__[i].getImplementation() if hasattr(self.__KLResultsAndDistributions__[i], 'getImplementation') else None for i in range(self.__field_distribution_count__) ] def getMesh(self): '''Returns a list containing the mesh of each process or None if it's a distribution. ''' return [self.__KLResultsAndDistributions__[i].getMesh() if hasattr(self.__KLResultsAndDistributions__[i], 'getMesh') else None for i in range(self.__field_distribution_count__) ] def getModes(self): '''Returns a list containing the modes of each process, None if distribution ''' return [self.__KLResultsAndDistributions__[i].getModes() if hasattr(self.__KLResultsAndDistributions__[i], 'getModes') else None for i in range(self.__field_distribution_count__) ] def getModesAsProcessSample(self): '''Returns a list containing the modes as a pcess sample for each process in the aggregation. ''' return [self.__KLResultsAndDistributions__[i].getModesAsProcessSample() if hasattr(self.__KLResultsAndDistributions__[i], 'getModesAsProcessSample') else None for i in range(self.__field_distribution_count__) ] def getName(self): '''Returns the name of the aggregation object. ''' return self.__name__ def getProjectionMatrix(self): '''Returns the projection matrix for each Karhunen-Loeve decomposition, None if it's a distribution. ''' return [self.__KLResultsAndDistributions__[i].getProjectionMatrix() if hasattr(self.__KLResultsAndDistributions__[i], 'getProjectionMatrix') else None for i in range(self.__field_distribution_count__) ] def getScaledModes(self): '''Returns the scaled modes for each Karhunen-Loeve decomposition, None if it's a distribution. ''' return [self.__KLResultsAndDistributions__[i].getScaledModes() if hasattr(self.__KLResultsAndDistributions__[i], 'getScaledModes') else None for i in range(self.__field_distribution_count__) ] def getScaledModesAsProcessSample(self): '''Returns the scaled modes as a proess sample for each Karhunen-Loeve decomposition, None if it's a distribution. ''' return [self.__KLResultsAndDistributions__[i].getScaledModesAsProcessSample() if hasattr(self.__KLResultsAndDistributions__[i], 'getScaledModes') else None for i in range(self.__field_distribution_count__) ] def getThreshold(self): '''Gets the global threshold for the Karhunen-Loeve expansions approximation. ''' return self.threshold def setName(self,name): '''Sets the name of the aggregation object. ''' self.__name__ = name def liftAsProcessSample(self, coefficients): '''Function to lift a sample of coefficients into a collections of process samples and points. Parameters ---------- coefficients : ot.Sample sample of values, follwing a centered normal law in general Returns ------- processes : list ordered list of samples of scalars (ot.Sample) and field samples (ot.ProcessSample) ''' assert isinstance(coefficients, (ot.Sample, ot.SampleImplementation)) print('Lifting as process sample') jumpDim = 0 processes = [] for i in range(self.__field_distribution_count__): if self.__isProcess__[i] : if not self.__liftWithMean__: processes.append(self.__KL_lifting__[i](coefficients[:, jumpDim : jumpDim + self.__mode_count__[i]])) else : processSample = self.__KL_lifting__[i](coefficients[:, jumpDim : jumpDim + self.__mode_count__[i]]) addConstant2Iterable(processSample, self.__means__[i]) processes.append(processSample) else : if not self.__liftWithMean__: processSample = ot.ProcessSample(ot.Mesh(), 0, 1) val_sample = self.__KL_lifting__[i](coefficients[:, jumpDim : jumpDim + self.__mode_count__[i]]) for j, value in enumerate(val_sample): field = ot.Field(ot.Mesh(),1) field.setValueAtIndex(0,value) processSample.add(field) processes.append(processSample) else : processSample = ot.ProcessSample(ot.Mesh(), 0, 1) val_sample = self.__KL_lifting__[i](coefficients[:, jumpDim : jumpDim + self.__mode_count__[i]]) mean = self.__means__[i] for j, value in enumerate(val_sample): field = ot.Field(ot.Mesh(),1) field.setValueAtIndex(0,[value[0]+mean]) # adding mean processSample.add(field) processes.append(processSample) jumpDim += self.__mode_count__[i] return processes def liftAsField(self, coefficients): '''Function to lift a vector of coefficients into a list of process samples and points. Parameters ---------- coefficients : ot.Point one vector values, follwing a centered normal law in general Returns ------- to return : list ordered list of scalars (ot.Point) and fields (ot.Field) ''' assert isinstance(coefficients, (ot.Point)), 'function only lifts points' valid = self._checkCoefficients(coefficients) print('Lifting as field') if valid : to_return = [] jumpDim = 0 for i in range(self.__field_distribution_count__): if self.__isProcess__[i] : field = self.__KLResultsAndDistributions__[i].liftAsField(coefficients[jumpDim : jumpDim + self.__mode_count__[i]]) jumpDim += self.__mode_count__[i] if not self.__liftWithMean__: to_return.append(field) else : vals = field.getValues() vals += self.__means__[i] field.setValues(vals) to_return.append(field) else : value = self.__KL_lifting__[i](coefficients[jumpDim : jumpDim + self.__mode_count__[i]]) jumpDim += self.__mode_count__[i] if not self.__liftWithMean__: #print('field value is',value) field = ot.Field(ot.Mesh(),1) field.setValueAtIndex(0,value) to_return.append(field) else : #print('field value is',value) field = ot.Field(ot.Mesh(),1) value[0] += self.__means__[i] field.setValueAtIndex(0,value) to_return.append(field) return to_return else : raise Exception('DimensionError : the vector of coefficient has the wrong shape')
def _convert_exec_ot(self, output): """Converts the output of the function passed to the class into a basic openturns object, and makes some checks on the dimensions. Note ---- If the checks fail, the output can still be found under self.__output_backup__ """ print( '''Using the single evaluation function. Assumes that the outputs are in the same order than for the batch evaluation function. This one should only return Points, Fields, Lists or numpy arrays.''') outputList = [] if len(output) != len(self._outputDescription): self.__nOutputs__ = len(output) self.setOutputDescription( ot.Description.BuildDefault(self.__nOutputs__, 'Y_')) print("shapes mismatched") for i, element in enumerate(output): if isinstance(element, (ot.Point, ot.Field)): element.setName(self._outputDescription[i]) outputList.append(element) try: dim = element.getDimension() except: dim = element.getMesh().getDimension() print( 'Element {} of the output tuple returns elements of type {} of dimension {}' .format(i, element.__class__.__name__, dim)) elif isinstance(element, (Sequence, Iterable)): intermElem = CustomList(element) shape = intermElem.shape dtype = intermElem.dtype assert dtype is not None, 'If None the list is not homogenous' if isinstance( dtype(), (Complex, Integral, Real, Rational, Number, str)): intermElem.recurse2list() if len(shape) >= 2: print( 'Element {} of the output tuple returns fields of dimension {}' .format(i, len(shape))) intermElem.flatten() element = ot.Field( self._buildMesh(self._getGridShape(shape)), [[elem] for elem in intermElem]) element.setName(self._outputDescription[i]) outputList.append(element) if len(shape) == 1: print( 'Element {} of the output tuple returns points of dimension {}' .format(i, shape[0])) intermElem.recurse2list() intermElem.flatten() element = ot.Point(intermElem) element.setName(self._outputDescription[i]) outputList.append(element) else: print('Do not use non-numerical dtypes in your objects') print('Wrong dtype is: ', dtype.__name__) elif isinstance(element, (Complex, Integral, Real, Rational, Number, str)): print( 'Element {} of the output tuple returns unique {}'.format( i, type(element).__name__)) outputList.append(element) elif isinstance(element, (ot.Sample, ot.ProcessSample)): print( 'ONLY _exec_sample FUNCTION MUST RETURN ot.Sample OR ot.ProcessSample OBJECTS!!' ) raise TypeError else: print('Element is {} of type {}'.format( element, type(element).__name__)) raise NotImplementedError return outputList
def _exec(self, X): Y = ot.Field(X.getMesh(), X.getValues() * ([2.0] * X.getValues().getDimension())) return Y
f = ot.PythonFunction(3, 2, flow) phi = ot.ParametricFunction(f, [2], [0.0]) solver = ot.RungeKutta(phi) initialState = [2.0, 2.0] nt = 47 dt = 0.1 timeGrid = ot.RegularGrid(0.0, dt, nt) result = solver.solve(initialState, timeGrid) xMin = result.getMin() xMax = result.getMax() delta = 0.2 * (xMax - xMin) mesh = ot.IntervalMesher([12] * 2).build( ot.Interval(xMin - delta, xMax + delta)) field = ot.Field(mesh, phi(mesh.getVertices())) ot.ResourceMap.SetAsScalar("Field-ArrowScaling", 0.1) graph = field.draw() cloud = ot.Cloud(mesh.getVertices()) cloud.setColor("black") graph.add(cloud) curve = ot.Curve(result) curve.setColor("red") curve.setLineWidth(2) graph.add(curve) fig = plt.figure() ax = fig.add_subplot(111) View(graph, figure=fig) plt.suptitle("Lotka-Volterra ODE system") plt.xlabel(r'$y_0$')
def _convert_exec_sample_ot(self, output): """Converts the output of the batch function passed to the class into a basic openturns object, and makes some checks on the dimensions. Note ---- If the checks fail, the output can still be found under self.__output_backup__ """ print( '''Using the batch evaluation function. Assumes that the outputs are in the same order than for the single evaluation function. This one should only return ProcessSamples, Samples, Lists or numpy arrays.''') outputList = [] if len(output) != len(self._outputDescription): self.__nOutputs__ = len(output) self.setOutputDescription( ot.Description.BuildDefault(self.__nOutputs__, 'Y_')) for i, element in enumerate(output): if isinstance(element, (ot.Sample, ot.ProcessSample)): element.setName(self._outputDescription[i]) outputList.append(element) print( 'Element {} of the output tuple returns elements of type {} of dimension {}' .format(i, element.__class__.__name__, element.getDimension())) elif isinstance(element, (Sequence, Iterable)): print( 'Element is iterable, assumes that first dimension is size of sample' ) intermElem = CustomList(element) intermElem.recurse2list() shape = intermElem.shape dtype = intermElem.dtype print('Shape is {} and dtype is {}'.format(shape, dtype)) sampleSize = shape[0] subSample = [ CustomList(intermElem[j]) for j in range(sampleSize) ] assert dtype is not None, 'If None the list is not homogenous' if isinstance( dtype(), (Complex, Integral, Real, Rational, Number, str)): if len(shape) >= 2: print( 'Element {} of the output tuple returns process samples of dimension {}' .format(i, len(shape) - 1)) mesh = self._buildMesh(self._getGridShape(shape[1:])) subSample = [ subSample[j].flatten() for j in range(sampleSize) ] procsample = ot.ProcessSample(mesh, 0, len(shape) - 1) for j in range(sampleSize): procsample.add( ot.Field(mesh, [[elem] for elem in subSample[j].data])) procsample.setName(self._outputDescription[i]) outputList.append(procsample) elif len(shape) == 1: print( 'Element {} of the output tuple returns samples of dimension {}' .format(i, 1)) element = ot.Sample([[dat] for dat in intermElem.data]) element.setName(self._outputDescription[i]) outputList.append(element) else: print('Do not use non-numerical dtypes in your objects') print('Wrong dtype is: ', dtype.__name__) elif isinstance(element, ot.Point): print( 'Element {} of the output tuple returns samples of dimension 1' .format(i, type(element).__name__)) element = ot.Sample([[element[j]] for j in range(len(element))]) element.setName(self._outputDescription[i]) outputList.append(element) elif isinstance(element, ot.Field): print( 'ONLY _exec_sample FUNCTION MUST RETURN ot.Sample OR ot.ProcessSample OBJECTS!!' ) raise TypeError else: print('Element is {} of type {}'.format( element, element.__class__.__name__)) raise NotImplementedError return outputList
# %% # First, we define a regular 2-d mesh discretization = [10, 5] mesher = ot.IntervalMesher(discretization) lowerBound = [0.0, 0.0] upperBound = [2.0, 1.0] interval = ot.Interval(lowerBound, upperBound) mesh = mesher.build(interval) graph = mesh.draw() graph.setTitle('Regular 2-d mesh') view = viewer.View(graph) # %% # We now create a field from a mesh and some values values = ot.Normal([0.0] * 2, [1.0] * 2, ot.CorrelationMatrix(2)).getSample(len(mesh.getVertices())) for i in range(len(values)): x = values[i] values[i] = 0.05 * x / x.norm() field = ot.Field(mesh, values) # %% # We can export the `field` to a VTK files. It can be # read later with an external program such as Paraview. field.exportToVTKFile('field.vtk') # %% # Display figures plt.show()
# %% # First, define a regular 2-d mesh discretization = [10, 5] mesher = ot.IntervalMesher(discretization) lowerBound = [0.0, 0.0] upperBound = [2.0, 1.0] interval = ot.Interval(lowerBound, upperBound) mesh = mesher.build(interval) mesh = ot.RegularGrid(0.0, 0.01, 100) graph = mesh.draw() view = viewer.View(graph) # %% # Allocate a process sample from a field field = ot.Field() sampleSize = 10 processSample = ot.ProcessSample(sampleSize, field) #field.draw() # %% # Create a process sample as realizations of a process amplitude = [1.0] scale = [0.2] * 1 myCovModel = ot.ExponentialModel(scale, amplitude) myProcess = ot.GaussianProcess(myCovModel, mesh) processSample = myProcess.getSample(10) #processSample # %% # draw the sample, without interpolation