def meshHeart(ntheta, nr): # First, build the nodes nodes = ot.Sample(0, 2) nodes.add([0.0, 0.0]) for j in range(ntheta): theta = (m.pi * j) / ntheta if (abs(theta - 0.5 * m.pi) < 1e-10): rho = 2.0 elif (abs(theta) < 1e-10) or (abs(theta - m.pi) < 1e-10): rho = 0.0 else: absTanTheta = abs(m.tan(theta)) rho = absTanTheta**(1.0 / absTanTheta) + m.sin(theta) cosTheta = m.cos(theta) sinTheta = m.sin(theta) for k in range(nr): tau = (k + 1.0) / nr r = rho * tau nodes.add([r * cosTheta, r * sinTheta - tau]) # Second, build the triangles triangles = [] ## First heart for j in range(ntheta): triangles.append([0, 1 + j * nr, 1 + ((j + 1) % ntheta) * nr]) # Other hearts for j in range(ntheta): for k in range(nr - 1): i0 = k + 1 + j * nr i1 = k + 2 + j * nr i2 = k + 2 + ((j + 1) % ntheta) * nr i3 = k + 1 + ((j + 1) % ntheta) * nr triangles.append([i0, i1, i2 % (nr * ntheta)]) triangles.append([i0, i2, i3 % (nr * ntheta)]) return ot.Mesh(nodes, triangles)
def liftAsProcessSample(self, coefficients): '''Function to lift a sample of coefficients into a collections of process samples and points. Parameters ---------- coefficients : ot.Sample sample of values, follwing a centered normal law in general Returns ------- processes : list ordered list of samples of scalars (ot.Sample) and field samples (ot.ProcessSample) ''' assert isinstance(coefficients, (ot.Sample, ot.SampleImplementation)) print('Lifting as process sample') jumpDim = 0 processes = [] for i in range(self.__field_distribution_count__): if self.__isProcess__[i] : if not self.__liftWithMean__: processes.append(self.__KL_lifting__[i](coefficients[:, jumpDim : jumpDim + self.__mode_count__[i]])) else : processSample = self.__KL_lifting__[i](coefficients[:, jumpDim : jumpDim + self.__mode_count__[i]]) addConstant2Iterable(processSample, self.__means__[i]) processes.append(processSample) else : if not self.__liftWithMean__: processSample = ot.ProcessSample(ot.Mesh(), 0, 1) val_sample = self.__KL_lifting__[i](coefficients[:, jumpDim : jumpDim + self.__mode_count__[i]]) for j, value in enumerate(val_sample): field = ot.Field(ot.Mesh(),1) field.setValueAtIndex(0,value) processSample.add(field) processes.append(processSample) else : processSample = ot.ProcessSample(ot.Mesh(), 0, 1) val_sample = self.__KL_lifting__[i](coefficients[:, jumpDim : jumpDim + self.__mode_count__[i]]) mean = self.__means__[i] for j, value in enumerate(val_sample): field = ot.Field(ot.Mesh(),1) field.setValueAtIndex(0,[value[0]+mean]) # adding mean processSample.add(field) processes.append(processSample) jumpDim += self.__mode_count__[i] return processes
def simulate(self, value_input=None, reset=True, **kwargs): """Simulate the fmu. Parameters ---------- value_input : Vector of input values. reset : Boolean, toggle resetting the FMU prior to simulation. True by default. time : Sequence of floats, time vector (optional). timestep : Float, time step in seconds (optional). Additional keyword arguments are passed on to the 'simulate' method of the underlying PyFMI model object. """ kwargs.setdefault("initialization_script", self.initialization_script) kwargs_simulate = fmi.parse_kwargs_simulate( value_input, name_input=self.getFMUInputDescription(), name_output=self.getFMUOutputDescription(), model=self.model, **kwargs) if "final_time" in kwargs.keys(): raise Warning("final_time must be set in the constructor.") if "start_time" in kwargs.keys(): raise Warning("start_time must be set in the constructor.") simulation = fmi.simulate(self.model, reset=reset, start_time=self.start_time, final_time=self.final_time, **kwargs_simulate) time, values = fmi.strip_simulation(simulation, name_output=self.getOutputDescription(), final="trajectory") local_mesh = ot.Mesh([[t] for t in time], [[i, i + 1] for i in range(len(time) - 1)]) interpolation = ot.P1LagrangeInterpolation(local_mesh, self.getOutputMesh(), self.getOutputDimension()) return interpolation(values)
# Test realization print('One realization= ') print(myCompositeProcess.getRealization()) # future print('future=', myCompositeProcess.getFuture(5)) # # Create a spatial dynamical function # Create the function g : R^2 --> R^2 # (x1,x2) --> (x1^2, x1+x2) g = ot.SymbolicFunction(['x1', 'x2'], ['x1^2', 'x1+x2']) # Convert g : R --> R into a spatial fucntion myDynFunc = ot.ValueFunction(g, ot.Mesh(2)) # Then g acts on processes X: Omega * R^nSpat --> R^2 # # Create a trend function fTrend: R^n --> R^q # for example for myXtProcess of dimension 2 # defined on a bidimensional mesh # fTrend : R^2 --> R^2 # (t1, t2) --> (1+2t1, 1+3t2) fTrend = ot.SymbolicFunction(['t1', 't2'], ['1+2*t1', '1+3*t2']) # # Create a Gaussian process of dimension 2 # which mesh is of box of dimension 2
class AggregatedKarhunenLoeveResults(object): '''Class allowing us to aggregated scalar distributions and stochastic processes. Thanks to the Karhunen-Loève expansion we can consider a process with a given covariance model as a vector of random variables following a centered normal law. By stacking the scalar distributions and the vectors representative of the fields we obtain a unique vector representation of our aggregate. It is a link between a non homogenous ensemble of fields and scalars to a unique vector of scalars. ''' def __init__(self, composedKLResultsAndDistributions): '''Initializes the aggregation Parameters ---------- composedKLResultsAndDistributions : list list of ordered ot.Distribution and ot.KarhunenLoeveResult objects ''' self.__KLResultsAndDistributions__ = atLeastList(composedKLResultsAndDistributions) #KLRL : Karhunen Loeve Result List assert len(self.__KLResultsAndDistributions__)>0 self.__field_distribution_count__ = len(self.__KLResultsAndDistributions__) self.__name__ = 'Unnamed' self.__KL_lifting__ = [] self.__KL_projecting__ = [] #Flags self.__isProcess__ = [False]*self.__field_distribution_count__ self.__has_distributions__ = False self.__unified_dimension__ = False self.__unified_mesh__ = False self.__isAggregated__ = False self.__means__ = [.0]*self.__field_distribution_count__ self.__liftWithMean__ = False # checking the nature of eachelement of the input list for i in range(self.__field_distribution_count__): # If element is a Karhunen Loeve decomposition if isinstance(self.__KLResultsAndDistributions__[i], ot.KarhunenLoeveResult): # initializing lifting and projecting objects. self.__KL_lifting__.append(ot.KarhunenLoeveLifting(self.__KLResultsAndDistributions__[i])) self.__KL_projecting__.append(ot.KarhunenLoeveProjection(self.__KLResultsAndDistributions__[i])) self.__isProcess__[i] = True # If element is a distribution elif isinstance(self.__KLResultsAndDistributions__[i], (ot.Distribution, ot.DistributionImplementation)): self.__has_distributions__ = True if self.__KLResultsAndDistributions__[i].getMean()[0] != 0 : print('The mean value of distribution {} at index {} of type {} is not 0.'.format(str('"'+self.__KLResultsAndDistributions__[i].getName()+'"'), str(i), self.__KLResultsAndDistributions__[i].getClassName())) name_distr = self.__KLResultsAndDistributions__[i].getName() self.__means__[i] = self.__KLResultsAndDistributions__[i].getMean()[0] self.__KLResultsAndDistributions__[i] -= self.__means__[i] self.__KLResultsAndDistributions__[i].setName(name_distr) print('Distribution recentered and mean added to list of means') print('Set the "liftWithMean" flag to true if you want to include the mean.') # We can say that the inverse iso probabilistic transformation is analoguous to lifting self.__KL_lifting__.append(self.__KLResultsAndDistributions__[i].getInverseIsoProbabilisticTransformation()) # We can say that the iso probabilistic transformation is analoguous to projecting self.__KL_projecting__.append(self.__KLResultsAndDistributions__[i].getIsoProbabilisticTransformation()) # If the function has distributions it cant be homogenous if not self.__has_distributions__ : self.__unified_mesh__ = all_same([self.__KLResultsAndDistributions__[i].getMesh() for i in range(self.__field_distribution_count__)]) self.__unified_dimension__ = ( all_same([self.__KLResultsAndDistributions__[i].getCovarianceModel().getOutputDimension() for i in range(self.__field_distribution_count__)])\ and all_same([self.__KLResultsAndDistributions__[i].getCovarianceModel().getInputDimension() for i in range(self.__field_distribution_count__)])) # If only one object is passed it has to be an decomposed aggregated process if self.__field_distribution_count__ == 1 : if hasattr(self.__KLResultsAndDistributions__[0], 'getCovarianceModel') and hasattr(self.__KLResultsAndDistributions__[0], 'getMesh'): #Cause when aggregated there is usage of multvariate covariance functions self.__isAggregated__ = self.__KLResultsAndDistributions__[0].getCovarianceModel().getOutputDimension() > self.__KLResultsAndDistributions__[0].getMesh().getDimension() print('Process seems to be aggregated. ') else : print('There is no point in passing only one process that is not aggregated') raise TypeError self.threshold = max([self.__KLResultsAndDistributions__[i].getThreshold() if hasattr(self.__KLResultsAndDistributions__[i], 'getThreshold') else 1e-3 for i in range(self.__field_distribution_count__)]) #Now we gonna get the data we will usually need self.__process_distribution_description__ = [self.__KLResultsAndDistributions__[i].getName() for i in range(self.__field_distribution_count__)] self._checkSubNames() self.__mode_count__ = [self.__KLResultsAndDistributions__[i].getEigenValues().getSize() if hasattr(self.__KLResultsAndDistributions__[i], 'getEigenValues') else 1 for i in range(self.__field_distribution_count__)] self.__mode_description__ = self._getModeDescription() def __repr__(self): '''Visual representation of the object ''' covarianceList = self.getCovarianceModel() eigValList = self.getEigenValues() meshList = self.getMesh() reprStr = '| '.join(['class = ComposedKarhunenLoeveResultsAndDistributions', 'name = {}'.format(self.getName()), 'Aggregation Order = {}'.format(str(self.__field_distribution_count__)), 'Threshold = {}'.format(str(self.threshold)), *['Covariance Model {} = '.format(str(i))+covarianceList[i].__repr__() for i in range(self.__field_distribution_count__)], *['Eigen Value {} = '.format(str(i))+eigValList[i].__repr__() for i in range(self.__field_distribution_count__)], *['Mesh {} = '.format(str(i))+meshList[i].__repr__().partition('data=')[0] for i in range(self.__field_distribution_count__)]]) return reprStr def _checkSubNames(self): """Check's the names of the objects passed to see if they are all unique or if default ones have to be assigned""" if len(set(self.__process_distribution_description__)) != len(self.__process_distribution_description__) : print('The process names are not unique.') print('Using generic name. ') for i, process in enumerate(self.__KLResultsAndDistributions__): oldName = process.getName() newName = 'X_'+str(i) print('Old name was {}, new one is {}'.format(oldName, newName)) process.setName(newName) self.__process_distribution_description__ = [self.__KLResultsAndDistributions__[i].getName() for i in range(self.__field_distribution_count__)] def _getModeDescription(self): """Returns the description of each element of the input vector. (The vector obtained once the processes expanded and stacked with the distribution) """ modeDescription = list() for i, nMode in enumerate(self.__mode_count__): for j in range(nMode): modeDescription.append(self.__process_distribution_description__[i]+'_'+str(j)) return modeDescription def _checkCoefficients(self, coefficients): '''Function to check if the vector passed has the right number of elements''' nModes = sum(self.__mode_count__) if (isinstance(coefficients, ot.Point), len(coefficients) == nModes): return True elif (isinstance(coefficients, (ot.Sample, ot.SampleImplementation)) and len(coefficients[0]) == nModes): return True else : print('The vector passed has not the right number of elements.') print('n° elems: {} != {}'.format(str(len(coefficients)), str(nModes))) return False # new method def getMean(self, i = None): '''Get the mean value of the stochastic processes and the scalar distributions Parameters ---------- i : int index of distribution or process ''' if i is not None: return self.__means__[i] else : return self.__means__ # new method def setMean(self, i, val ): '''Sets the mean of the variable at the index i to a value Parameters ---------- i : int index of distribution or process val : float, int value to which we set the mean ''' self.__means__[i] = val # new method def setLiftWithMean(self, theBool): '''Flag to say if we add the mean to the generated values of fields or scalars If not, all the events are centered Parameters ---------- theBool : bool if to lift the distributions and processes to their non homogenous original space with their mean value or centereds ''' self.__liftWithMean__ = theBool def getClassName(self): '''Returns a list of the class each process/distribution belongs to. ''' classNames=[self.__KLResultsAndDistributions__[i].__class__.__name__ for i in range(self.__field_distribution_count__) ] return list(set(classNames)) def getCovarianceModel(self): '''Returns a list of covariance models for each process. ''' return [self.__KLResultsAndDistributions__[i].getCovarianceModel() if hasattr(self.__KLResultsAndDistributions__[i], 'getCovarianceModel') else None for i in range(self.__field_distribution_count__) ] def getEigenValues(self): '''Returns a list of the eigen values for each process. ''' return [self.__KLResultsAndDistributions__[i].getEigenValues() if hasattr(self.__KLResultsAndDistributions__[i], 'getEigenValues') else None for i in range(self.__field_distribution_count__) ] def getId(self): '''Returns a list containing the ID of each process/distribution. ''' return [self.__KLResultsAndDistributions__[i].getId() for i in range(self.__field_distribution_count__) ] def getImplementation(self): '''Returns a list containing the implementation of each process/distribution, else None. ''' return [self.__KLResultsAndDistributions__[i].getImplementation() if hasattr(self.__KLResultsAndDistributions__[i], 'getImplementation') else None for i in range(self.__field_distribution_count__) ] def getMesh(self): '''Returns a list containing the mesh of each process or None if it's a distribution. ''' return [self.__KLResultsAndDistributions__[i].getMesh() if hasattr(self.__KLResultsAndDistributions__[i], 'getMesh') else None for i in range(self.__field_distribution_count__) ] def getModes(self): '''Returns a list containing the modes of each process, None if distribution ''' return [self.__KLResultsAndDistributions__[i].getModes() if hasattr(self.__KLResultsAndDistributions__[i], 'getModes') else None for i in range(self.__field_distribution_count__) ] def getModesAsProcessSample(self): '''Returns a list containing the modes as a pcess sample for each process in the aggregation. ''' return [self.__KLResultsAndDistributions__[i].getModesAsProcessSample() if hasattr(self.__KLResultsAndDistributions__[i], 'getModesAsProcessSample') else None for i in range(self.__field_distribution_count__) ] def getName(self): '''Returns the name of the aggregation object. ''' return self.__name__ def getProjectionMatrix(self): '''Returns the projection matrix for each Karhunen-Loeve decomposition, None if it's a distribution. ''' return [self.__KLResultsAndDistributions__[i].getProjectionMatrix() if hasattr(self.__KLResultsAndDistributions__[i], 'getProjectionMatrix') else None for i in range(self.__field_distribution_count__) ] def getScaledModes(self): '''Returns the scaled modes for each Karhunen-Loeve decomposition, None if it's a distribution. ''' return [self.__KLResultsAndDistributions__[i].getScaledModes() if hasattr(self.__KLResultsAndDistributions__[i], 'getScaledModes') else None for i in range(self.__field_distribution_count__) ] def getScaledModesAsProcessSample(self): '''Returns the scaled modes as a proess sample for each Karhunen-Loeve decomposition, None if it's a distribution. ''' return [self.__KLResultsAndDistributions__[i].getScaledModesAsProcessSample() if hasattr(self.__KLResultsAndDistributions__[i], 'getScaledModes') else None for i in range(self.__field_distribution_count__) ] def getThreshold(self): '''Gets the global threshold for the Karhunen-Loeve expansions approximation. ''' return self.threshold def setName(self,name): '''Sets the name of the aggregation object. ''' self.__name__ = name def liftAsProcessSample(self, coefficients): '''Function to lift a sample of coefficients into a collections of process samples and points. Parameters ---------- coefficients : ot.Sample sample of values, follwing a centered normal law in general Returns ------- processes : list ordered list of samples of scalars (ot.Sample) and field samples (ot.ProcessSample) ''' assert isinstance(coefficients, (ot.Sample, ot.SampleImplementation)) print('Lifting as process sample') jumpDim = 0 processes = [] for i in range(self.__field_distribution_count__): if self.__isProcess__[i] : if not self.__liftWithMean__: processes.append(self.__KL_lifting__[i](coefficients[:, jumpDim : jumpDim + self.__mode_count__[i]])) else : processSample = self.__KL_lifting__[i](coefficients[:, jumpDim : jumpDim + self.__mode_count__[i]]) addConstant2Iterable(processSample, self.__means__[i]) processes.append(processSample) else : if not self.__liftWithMean__: processSample = ot.ProcessSample(ot.Mesh(), 0, 1) val_sample = self.__KL_lifting__[i](coefficients[:, jumpDim : jumpDim + self.__mode_count__[i]]) for j, value in enumerate(val_sample): field = ot.Field(ot.Mesh(),1) field.setValueAtIndex(0,value) processSample.add(field) processes.append(processSample) else : processSample = ot.ProcessSample(ot.Mesh(), 0, 1) val_sample = self.__KL_lifting__[i](coefficients[:, jumpDim : jumpDim + self.__mode_count__[i]]) mean = self.__means__[i] for j, value in enumerate(val_sample): field = ot.Field(ot.Mesh(),1) field.setValueAtIndex(0,[value[0]+mean]) # adding mean processSample.add(field) processes.append(processSample) jumpDim += self.__mode_count__[i] return processes def liftAsField(self, coefficients): '''Function to lift a vector of coefficients into a list of process samples and points. Parameters ---------- coefficients : ot.Point one vector values, follwing a centered normal law in general Returns ------- to return : list ordered list of scalars (ot.Point) and fields (ot.Field) ''' assert isinstance(coefficients, (ot.Point)), 'function only lifts points' valid = self._checkCoefficients(coefficients) print('Lifting as field') if valid : to_return = [] jumpDim = 0 for i in range(self.__field_distribution_count__): if self.__isProcess__[i] : field = self.__KLResultsAndDistributions__[i].liftAsField(coefficients[jumpDim : jumpDim + self.__mode_count__[i]]) jumpDim += self.__mode_count__[i] if not self.__liftWithMean__: to_return.append(field) else : vals = field.getValues() vals += self.__means__[i] field.setValues(vals) to_return.append(field) else : value = self.__KL_lifting__[i](coefficients[jumpDim : jumpDim + self.__mode_count__[i]]) jumpDim += self.__mode_count__[i] if not self.__liftWithMean__: #print('field value is',value) field = ot.Field(ot.Mesh(),1) field.setValueAtIndex(0,value) to_return.append(field) else : #print('field value is',value) field = ot.Field(ot.Mesh(),1) value[0] += self.__means__[i] field.setValueAtIndex(0,value) to_return.append(field) return to_return else : raise Exception('DimensionError : the vector of coefficient has the wrong shape')
algo_kl_process_1D = ot.KarhunenLoeveP1Algorithm( mesh_1D, process_1D.getCovarianceModel()) algo_kl_process_1D.run() kl_results_1D = algo_kl_process_1D.getResult() algo_kl_process_2D = ot.KarhunenLoeveP1Algorithm( mesh_2D, process_2D.getCovarianceModel()) algo_kl_process_2D.run() kl_results_2D = algo_kl_process_2D.getResult() ### Now let's compose our Karhunen Loeve Results and our distributions. composedKLResultsAndDistributions = aklr.AggregatedKarhunenLoeveResults( [kl_results_2D, kl_results_1D, scalar_distribution]) ### Now let's see if we manage to project and lift the realizations we had before. realizationFields = [field_2D, field_1D, ot.Field(ot.Mesh(), [scalar_0[0]])] projectedCoeffs = composedKLResultsAndDistributions.project(realizationFields) print('Projected coefficients are :', projectedCoeffs) liftedFieldsO = composedKLResultsAndDistributions.liftAsField(projectedCoeffs) print('Lifted fields are :', liftedFieldsO) ### Now let's use our function wrapper and see if we get the same results! dummyWrapper = klgfw.KarhunenLoeveGeneralizedFunctionWrapper( composedKLResultsAndDistributions, dummyFunction2Wrap, None, 1) print('testing call:') dummyWrapper(projectedCoeffs) class TestComposeAndWrap(unittest.TestCase): def testLiftAndProject(self,
vertices.append([0.0, 0.0, 0.0]) vertices.append([0.0, 0.0, 1.0]) vertices.append([0.0, 1.0, 0.0]) vertices.append([0.0, 1.0, 1.0]) vertices.append([1.0, 0.0, 0.0]) vertices.append([1.0, 0.0, 1.0]) vertices.append([1.0, 1.0, 0.0]) vertices.append([1.0, 1.0, 1.0]) simplicies = [] simplicies.append([0, 1, 2, 4]) simplicies.append([3, 5, 6, 7]) simplicies.append([1, 2, 3, 6]) simplicies.append([1, 2, 4, 6]) simplicies.append([1, 3, 5, 6]) simplicies.append([1, 4, 5, 6]) mesh3D = ot.Mesh(vertices, simplicies) def myPyFunc(X): Xs = ot.Sample(X) Y = Xs * ([2.0] * Xs.getDimension()) Y.setDescription(ot.Description.BuildDefault(values.getDimension(), "Y")) return Y in_dim = 3 out_dim = 3 myFunc = ot.PythonFieldFunction(mesh3D, in_dim, mesh3D, out_dim, myPyFunc) print('myFunc=', myFunc)
# and AbsoluteExponential models covarianceModel = ot.SquaredExponential([7.63, 2.11], [7.38]) # 3) Basis definition basis = ot.ConstantBasisFactory(inputDimension).build() # Kriging algorithm algo = ot.KrigingAlgorithm(inputSample, outputSample, covarianceModel, basis) algo.setOptimizeParameters(False) # do not optimize hyper-parameters algo.run() result = algo.getResult() vertices = [[1.0, 0.0], [2.0, 0.0], [2.0, 1.0], [1.0, 1.0], [1.5, 0.5]] simplicies = [[0, 1, 4], [1, 2, 4], [2, 3, 4], [3, 0, 4]] mesh2D = ot.Mesh(vertices, simplicies) process = ot.ConditionedGaussianProcess(result, mesh2D) # Get a realization of the process realization = process.getRealization() print("realization = ", repr(realization)) # Get a sample & compare it to expectation sample = process.getSample(5000) mean = sample.computeMean() print("Mean over 5000 realizations = ", repr(mean)) # Check if one can sample the process over a mesh containing conditioning points # and 100 new points vertices = ot.Sample(inputSample) vertices.add(
#! /usr/bin/env python from __future__ import print_function import openturns as ot import os ot.TESTPREAMBLE() nrVertices = 100 vertices = ot.Normal().getSample(nrVertices).sort() simplices = [[i, i + 1] for i in range(nrVertices - 1)] mesh1 = ot.Mesh(vertices, simplices) vertices *= -1.0 mesh2 = ot.Mesh(vertices, simplices) for mesh in [mesh1, mesh2]: lowerBound = mesh.getLowerBound()[0] upperBound = mesh.getUpperBound()[0] n = mesh.getSimplicesNumber() print("mesh=", mesh, "lowerBound=", lowerBound, "upperBound=", upperBound, n, "simplices") algo = ot.EnclosingSimplexMonotonic1D(mesh.getVertices()) ot.RandomGenerator.SetSeed(0) test = ot.Sample(ot.Uniform(-3.0, 3.0).getSample(1000)) vertices = mesh.getVertices() for vertex in test: index = algo.query(vertex) x = vertex[0] if x < lowerBound or x > upperBound:
#! /usr/bin/env python from __future__ import print_function import openturns as ot mesh = ot.RegularGrid(0.0, 1.0, 4) values = [(x, 2.0 * x, x * x) for x in mesh.getValues()] outPoint = [2.5] interpolation = ot.P1LagrangeInterpolation(mesh, ot.Mesh([outPoint]), len(values[0])) print("Interpolation=", interpolation) print("Values at", outPoint, "=", interpolation(values))
print("mesh1D=", mesh1D) # Manual bounding box mesh1D = mesher1D.build(levelSet1D, ot.Interval(-10.0, 10.0)) print("mesh1D=", mesh1D) # The 2D mesher mesher2D = ot.LevelSetMesher([5] * 2) print("mesher2D=", mesher2D) function2D = ot.SymbolicFunction( ["x0", "x1"], ["cos(x0 * x1)/(1 + 0.1 * (x0^2 + x1^2))"]) levelSet2D = ot.LevelSet(function2D, level) # Automatic bounding box mesh2D = ot.Mesh(mesher2D.build(levelSet2D)) print("mesh2D=", mesh2D) # Manual bounding box mesh2D = mesher2D.build(levelSet2D, ot.Interval([-10.0] * 2, [10.0] * 2)) print("mesh2D=", mesh2D) # The 3D mesher mesher3D = ot.LevelSetMesher([3] * 3) print("mesher3D=", mesher3D) function3D = ot.SymbolicFunction( ["x0", "x1", "x2"], ["cos(x0 * x1 + x2)/(1 + 0.1*(x0^2 + x1^2 + x2^2))"]) levelSet3D = ot.LevelSet(function3D, level)
import openturns as ot mesh = ot.Mesh(5) for v in mesh: print(v) print('OK') mesh.exportToTXT('/tmp/mesh.txt')
# check that hmat library was found print('7: HMatrix (hmat-oss)'.ljust(width), end=' ') try: # This is a little bit tricky because HMat 1.0 fails with 1x1 matrices ot.ResourceMap.SetAsUnsignedInteger('TemporalNormalProcess-SamplingMethod', 1) vertices = [[0.0, 0.0, 0.0]] vertices.append([1.0, 0.0, 0.0]) vertices.append([0.0, 1.0, 0.0]) vertices.append([0.0, 0.0, 1.0]) simplices = [[0, 1, 2, 3]] # Discard messages from HMat ot.Log.Show(0) process = ot.TemporalNormalProcess(ot.ExponentialModel(3), ot.Mesh(vertices, simplices)) f = process.getRealization() print('OK') except: print('no') # check that nlopt library was found print('8: optimization (NLopt)'.ljust(width), end=' ') try: problem = ot.OptimizationProblem() algo = ot.SLSQP() algo.setProblem(problem) print('OK') except: print('no')
#! /usr/bin/env python from __future__ import print_function import openturns as ot import math as m ot.PlatformInfo.SetNumericalPrecision(6) # 1D example mesh1D = ot.Mesh() print("Default 1D mesh=", mesh1D) vertices = ot.Sample(0, 1) vertices.add([0.5]) vertices.add([1.5]) vertices.add([2.1]) vertices.add([2.7]) simplicies = [[]] * 3 simplicies[0] = [0, 1] simplicies[1] = [1, 2] simplicies[2] = [2, 3] mesh1D = ot.Mesh(vertices, simplicies) mesh1Ddomain = ot.MeshDomain(mesh1D) tree = ot.KDTree(vertices) enclosingSimplex = ot.EnclosingSimplexAlgorithm(vertices, simplicies) print("1D mesh=", mesh1D) print("Is empty? ", mesh1D.isEmpty()) print("vertices=", mesh1D.getVertices()) print("simplices=", mesh1D.getSimplices()) print("volume=", "%.3f" % mesh1D.getVolume()) print("simplices volume=", mesh1D.computeSimplicesVolume()) p = [1.3] print("is p=", p, " in mesh? ", mesh1Ddomain.contains(p))
for i, vertex in enumerate(test): index = bvh.query(vertex) if index >= nrSimplices: print(i, "is outside") else: found, coordinates = mesh.checkPointInSimplexWithCoordinates( vertex, index) if not found: print("Wrong simplex found for", vertex, "(index=", index, simplices[index], "barycentric coordinates=", coordinates) indices = bvh.query(test) for i, index in enumerate(indices): if index >= nrSimplices: print(i, "is outside") else: found, coordinates = mesh.checkPointInSimplexWithCoordinates( test[i], index) if not found: print("Wrong simplex found for", test[i], "(index=", index, simplices[index], "barycentric coordinates=", coordinates) # segfault with 1 simplex mesh = ot.Mesh([[0.0, 0.0], [1.0, 0.0], [0.5, 1.0]], [[0, 1, 2]]) bvh = ot.BoundingVolumeHierarchy(mesh.getVertices(), mesh.getSimplices()) print(bvh.query([0.125, 0.2])) print(bvh.query([0.125, 0.3]))
sample.add(point2) sample.add(point3) sample.add(point4) sample.add(point2) sample.add(point4) sample.add(point3) print(sample) study.add('sample', sample) mesh = ot.IntervalMesher([50] * 3).build(ot.Interval(3)) study.add('mesh', mesh) study.save() study2 = ot.Study() study2.setStorageManager(ot.XMLH5StorageManager(fileName)) study2.load() sample2 = ot.Sample() study2.fillObject('sample', sample2) print(sample2) assert sample == sample2, "wrong sample" mesh2 = ot.Mesh() study2.fillObject('mesh', mesh2) assert mesh == mesh2, "wrong mesh" # cleanup os.remove(fileName) os.remove(fileName.replace(".xml.gz", ".h5"))
import openturns as ot from matplotlib import pyplot as plt from openturns.viewer import View # Define the vertices of the mesh vertices = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [1.5, 1.0], [2.0, 1.5], [0.5, 1.5]] # Define the simplices of the mesh simplices = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [2, 4, 5], [0, 2, 5]] # Create the Mesh mesh2D = ot.Mesh(vertices, simplices) # Create a Graph graph = ot.Graph('Mesh 2D', '', '', True, 'bottomright') graph.add(mesh2D.draw()) # Then, draw it fig = plt.figure(figsize=(4, 4)) axis = fig.add_subplot(111) View(graph, figure=fig, axes=[axis], add_legend=True) axis.set_xlim(auto=True)
# Set Numerical precision to 4 ot.PlatformInfo.SetNumericalPrecision(4) sampleSize = 40 spatialDimension = 1 # Create the function to estimate model = ot.SymbolicFunction(["x0"], ["x0"]) X = ot.Sample(sampleSize, spatialDimension) for i in range(sampleSize): X[i, 0] = 3.0 + (8.0 * i) / sampleSize Y = model(X) # Add a small noise to data Y += ot.GaussianProcess(ot.AbsoluteExponential( [0.1], [0.2]), ot.Mesh(X)).getRealization().getValues() basis = ot.LinearBasisFactory(spatialDimension).build() # Case of a misspecified covariance model covarianceModel = ot.DiracCovarianceModel(spatialDimension) print("===================================================\n") algo = ot.GeneralLinearModelAlgorithm(X, Y, covarianceModel, basis) algo.run() result = algo.getResult() print("\ncovariance (dirac, optimized)=", result.getCovarianceModel()) print("trend (dirac, optimized)=", result.getTrendCoefficients()) print("===================================================\n") # Now without estimating covariance parameters basis = ot.LinearBasisFactory(spatialDimension).build() covarianceModel = ot.DiracCovarianceModel(spatialDimension)
field = ot.Field(mesh, values) evaluation = ot.P1LagrangeEvaluation(field) x = [2.3] y = evaluation(x) print(y) ott.assert_almost_equal(y, [0.55]) # Learning sample on meshD mesher = ot.IntervalMesher([7, 7]) lowerbound = [-1.0, -1.0] upperBound = [1, 1] interval = ot.Interval(lowerbound, upperBound) meshD = mesher.build(interval) sample = ot.ProcessSample(meshD, 10, 1) field = ot.Field(meshD, 1) for k in range(sample.getSize()): field.setValues(ot.Normal().getSample(64)) sample[k] = field lagrange = ot.P1LagrangeEvaluation(sample) # New mesh mesh = ot.Mesh( ot.MonteCarloExperiment(ot.ComposedDistribution([ot.Uniform(-1, 1)] * 2), 200).generate()) point = mesh.getVertices()[0] y = lagrange(point) print(y) index = lagrange.getEnclosingSimplexAlgorithm().query(point) print(index) assert index == 12, "wrong index"
# ------------------ # %% # In this paragraph we create a mesh :math:`\mathcal{M}` associated to a domain :math:`\mathcal{D} \in \mathbb{R}^n`. # # A mesh is defined from vertices in :math:`\mathbb{R}^n` and a topology that connects the vertices: the simplices. The simplex :math:`Indices([i_1,\dots, i_{n+1}])` relies the vertices of index :math:`(i_1,\dots, i_{n+1})` in :math:`\mathbb{N}^n`. In dimension 1, a simplex is an interval :math:`Indices([i_1,i_2])`; in dimension 2, it is a triangle :math:`Indices([i_1,i_2, i_3])`. # # The library enables to easily create a mesh which is a box of dimension :math:`d=1` or :math:`d=2` regularly meshed in all its directions, thanks to the object IntervalMesher. # # Consider :math:`X: \Omega \times \mathcal{D} \rightarrow \mathbb{R}^d` a multivariate stochastic process of dimension :math:`d`, where :math:`\mathcal{D} \in \mathbb{R}^n`. The mesh :math:`\mathcal{M}` is a discretization of the domain :math:`\mathcal{D}`. # %% # A one dimensional mesh is created and represented by : vertices = [[0.5], [1.5], [2.1], [2.7]] simplicies = [[0, 1], [1, 2], [2, 3]] mesh1D = ot.Mesh(vertices, simplicies) graph1 = mesh1D.draw() graph1.setTitle('One dimensional mesh') view = viewer.View(graph1) # %% # We define a bidimensional mesh : vertices = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [1.5, 1.0], [2.0, 1.5], [0.5, 1.5]] simplicies = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [2, 4, 5], [0, 2, 5]] mesh2D = ot.Mesh(vertices, simplicies) graph2 = mesh2D.draw() graph2.setTitle('Bidimensional mesh') graph2.setLegendPosition('bottomright') view = viewer.View(graph2)
print("Checking %s" % (x_train_value)) indices = np.argwhere(x_test == x_train_value) if len(indices) == 1: print(" Delete %s" % (x_train_value)) x_test_filtered = np.delete(x_test_filtered, indices[0, 0]) else: print(" OK") return x_test_filtered # %% vertices_filtered = deleteCommonValues(np.array(x_train.asPoint()), np.array(vertices.asPoint())) # %% evaluationMesh = ot.Mesh(ot.Sample([[vf] for vf in vertices_filtered])) # %% process = ot.ConditionedGaussianProcess(krigingResult, evaluationMesh) # %% trajectories = process.getSample(10) type(trajectories) # %% # The `getSample` method returns a `ProcessSample`. By comparison, the `getSample` method of a `KrigingRandomVector` would return a `Sample`. # %% graph = trajectories.drawMarginal() graph.add(plot_data_test(x_test, y_test)) graph.add(plot_data_train(x_train, y_train))
# Set Numerical precision to 4 ot.PlatformInfo.SetNumericalPrecision(4) sampleSize = 40 inputDimension = 1 # Create the function to estimate model = ot.SymbolicFunction(["x0"], ["x0"]) X = ot.Sample(sampleSize, inputDimension) for i in range(sampleSize): X[i, 0] = 3.0 + (8.0 * i) / sampleSize Y = model(X) # Add a small noise to data Y += ot.GaussianProcess(ot.AbsoluteExponential([0.1], [0.2]), ot.Mesh(X)).getRealization().getValues() basis = ot.LinearBasisFactory(inputDimension).build() # Case of a misspecified covariance model covarianceModel = ot.DiracCovarianceModel(inputDimension) print("===================================================\n") algo = ot.GeneralLinearModelAlgorithm(X, Y, covarianceModel, basis) algo.run() result = algo.getResult() print("\ncovariance (dirac, optimized)=", result.getCovarianceModel()) print("trend (dirac, optimized)=", result.getTrendCoefficients()) print("===================================================\n") # Now without estimating covariance parameters basis = ot.LinearBasisFactory(inputDimension).build()
def __init__(self): self.__mesh = ot.Mesh(42)