ot.ResourceMap.SetAsScalar("LeastSquaresMetaModelSelection-ErrorThreshold", 1.0e-7) algo_chaos = ot.FunctionalChaosAlgorithm(sample_xi_X, sample_xi_Y, basis.getMeasure(), adaptive, projection) algo_chaos.run() result_chaos = algo_chaos.getResult() meta_model = result_chaos.getMetaModel() print("myConvolution=", myConvolution.getInputDimension(), "->", myConvolution.getOutputDimension()) preprocessing = ot.KarhunenLoeveProjection(result_X) print("preprocessing=", preprocessing.getInputDimension(), "->", preprocessing.getOutputDimension()) print("meta_model=", meta_model.getInputDimension(), "->", meta_model.getOutputDimension()) postprocessing = ot.KarhunenLoeveLifting(result_Y) print("postprocessing=", postprocessing.getInputDimension(), "->", postprocessing.getOutputDimension()) meta_model_field = ot.FieldToFieldConnection( postprocessing, ot.FieldToPointConnection(meta_model, preprocessing)) # %% # Meta_model validation iMax = 10 sample_X_validation = process_X.getSample(iMax) sample_Y_validation = myConvolution(sample_X_validation) graph_sample_Y_validation = sample_Y_validation.drawMarginal(0) sample_Y_hat = meta_model_field(sample_X_validation) graph = sample_Y_hat.drawMarginal(0) for i in range(iMax):
def globalMetamodel(sample): emulatedCoefficients = metamodel(sample) restoreFunction = ot.KarhunenLoeveLifting(resultKL) emulatedProcessSample = restoreFunction(emulatedCoefficients) return emulatedProcessSample
def __init__(self, composedKLResultsAndDistributions): '''Initializes the aggregation Parameters ---------- composedKLResultsAndDistributions : list list of ordered ot.Distribution and ot.KarhunenLoeveResult objects ''' self.__KLResultsAndDistributions__ = atLeastList(composedKLResultsAndDistributions) #KLRL : Karhunen Loeve Result List assert len(self.__KLResultsAndDistributions__)>0 self.__field_distribution_count__ = len(self.__KLResultsAndDistributions__) self.__name__ = 'Unnamed' self.__KL_lifting__ = [] self.__KL_projecting__ = [] #Flags self.__isProcess__ = [False]*self.__field_distribution_count__ self.__has_distributions__ = False self.__unified_dimension__ = False self.__unified_mesh__ = False self.__isAggregated__ = False self.__means__ = [.0]*self.__field_distribution_count__ self.__liftWithMean__ = False # checking the nature of eachelement of the input list for i in range(self.__field_distribution_count__): # If element is a Karhunen Loeve decomposition if isinstance(self.__KLResultsAndDistributions__[i], ot.KarhunenLoeveResult): # initializing lifting and projecting objects. self.__KL_lifting__.append(ot.KarhunenLoeveLifting(self.__KLResultsAndDistributions__[i])) self.__KL_projecting__.append(ot.KarhunenLoeveProjection(self.__KLResultsAndDistributions__[i])) self.__isProcess__[i] = True # If element is a distribution elif isinstance(self.__KLResultsAndDistributions__[i], (ot.Distribution, ot.DistributionImplementation)): self.__has_distributions__ = True if self.__KLResultsAndDistributions__[i].getMean()[0] != 0 : print('The mean value of distribution {} at index {} of type {} is not 0.'.format(str('"'+self.__KLResultsAndDistributions__[i].getName()+'"'), str(i), self.__KLResultsAndDistributions__[i].getClassName())) name_distr = self.__KLResultsAndDistributions__[i].getName() self.__means__[i] = self.__KLResultsAndDistributions__[i].getMean()[0] self.__KLResultsAndDistributions__[i] -= self.__means__[i] self.__KLResultsAndDistributions__[i].setName(name_distr) print('Distribution recentered and mean added to list of means') print('Set the "liftWithMean" flag to true if you want to include the mean.') # We can say that the inverse iso probabilistic transformation is analoguous to lifting self.__KL_lifting__.append(self.__KLResultsAndDistributions__[i].getInverseIsoProbabilisticTransformation()) # We can say that the iso probabilistic transformation is analoguous to projecting self.__KL_projecting__.append(self.__KLResultsAndDistributions__[i].getIsoProbabilisticTransformation()) # If the function has distributions it cant be homogenous if not self.__has_distributions__ : self.__unified_mesh__ = all_same([self.__KLResultsAndDistributions__[i].getMesh() for i in range(self.__field_distribution_count__)]) self.__unified_dimension__ = ( all_same([self.__KLResultsAndDistributions__[i].getCovarianceModel().getOutputDimension() for i in range(self.__field_distribution_count__)])\ and all_same([self.__KLResultsAndDistributions__[i].getCovarianceModel().getInputDimension() for i in range(self.__field_distribution_count__)])) # If only one object is passed it has to be an decomposed aggregated process if self.__field_distribution_count__ == 1 : if hasattr(self.__KLResultsAndDistributions__[0], 'getCovarianceModel') and hasattr(self.__KLResultsAndDistributions__[0], 'getMesh'): #Cause when aggregated there is usage of multvariate covariance functions self.__isAggregated__ = self.__KLResultsAndDistributions__[0].getCovarianceModel().getOutputDimension() > self.__KLResultsAndDistributions__[0].getMesh().getDimension() print('Process seems to be aggregated. ') else : print('There is no point in passing only one process that is not aggregated') raise TypeError self.threshold = max([self.__KLResultsAndDistributions__[i].getThreshold() if hasattr(self.__KLResultsAndDistributions__[i], 'getThreshold') else 1e-3 for i in range(self.__field_distribution_count__)]) #Now we gonna get the data we will usually need self.__process_distribution_description__ = [self.__KLResultsAndDistributions__[i].getName() for i in range(self.__field_distribution_count__)] self._checkSubNames() self.__mode_count__ = [self.__KLResultsAndDistributions__[i].getEigenValues().getSize() if hasattr(self.__KLResultsAndDistributions__[i], 'getEigenValues') else 1 for i in range(self.__field_distribution_count__)] self.__mode_description__ = self._getModeDescription()
algo.run() KLResult = algo.getResult() scaledModes = KLResult.getScaledModesAsProcessSample() # %% graph = scaledModes.drawMarginal(0) graph.setTitle('KL modes') graph.setXTitle(r'$t$') graph.setYTitle(r'$z$') view = viewer.View(graph) # %% # We create the `postProcessingKL` function which takes coefficients of the the K.-L. modes as inputs and returns the trajectories. # %% karhunenLoeveLiftingFunction = ot.KarhunenLoeveLifting(KLResult) # %% # The `project` method computes the projection of the output sample (i.e. the trajectories) onto the K.-L. modes. # %% outputSampleChaos = KLResult.project(outputSample) # %% # We limit the sampling size of the Lilliefors selection in order to reduce the computational burden. # %% ot.ResourceMap.SetAsUnsignedInteger( "FittingTest-LillieforsMaximumSamplingSize", 1) # %%
process = ot.GaussianProcess(model0, mesh) # get some realizations and a sample ot.RandomGenerator_SetSeed(11111) field1D = process.getRealization() #FIELD BASE ot.RandomGenerator_SetSeed(11111) sample1D = process.getSample(10) #SAMPLE BASE # get the Karhunen Loeve decomposition of the mesh algorithm = ot.KarhunenLoeveP1Algorithm(mesh, model0, 1e-3) algorithm.run() results = algorithm.getResult() #### This is the object we will need ! #now let's project the field and the samples on the eigenmode basis lifter = ot.KarhunenLoeveLifting(results) projecter = ot.KarhunenLoeveProjection(results) coeffField1D = projecter(field1D) coeffSample1D = projecter( sample1D ) #dimension of the coefficents, done internaly by our class but needed for comparison fieldVals = lifter(coeffField1D) sample_lifted = lifter(coeffSample1D) field_lifted = ot.Field(lifter.getOutputMesh(), fieldVals) # Definition of centered normal variable N05 = ot.Normal(0, 5) # Definition of centered normal variable
# Evaluation on a sample sample = [[1.0] * myFunc.getInputDimension()] * 10 print("sample=", sample) print("myFunc(sample)=", myFunc(sample)) # Get the number of calls print("called ", myFunc.getCallsNumber(), " times") # Construction based on a PointToFieldFunction followed by a FieldToPointFunction # Create a KarhunenLoeveResult mesh = ot.IntervalMesher([9]).build(ot.Interval(-1.0, 1.0)) cov1D = ot.AbsoluteExponential([1.0]) algo = ot.KarhunenLoeveP1Algorithm(mesh, cov1D, 0.0) algo.run() result = algo.getResult() # Create a PointToFieldFunction lifting = ot.KarhunenLoeveLifting(result) # Create a FieldToPointFunction projection = ot.KarhunenLoeveProjection(result) # Create an instance myFunc = ot.PointToPointEvaluation(projection, lifting) print("myFunc=", myFunc) # Get the input and output description print("myFunc input description=", myFunc.getInputDescription()) print("myFunc output description=", myFunc.getOutputDescription()) # Get the input and output dimension print("myFunc input dimension=", myFunc.getInputDimension()) print("myFunc output dimension=", myFunc.getOutputDimension()) # Evaluation on a point point = [1.0] * myFunc.getInputDimension() print("point=", point)
KLResult = algo.getResult() scaledModes = KLResult.getScaledModesAsProcessSample() graph = scaledModes.drawMarginal(0) graph.setTitle('Modes de KL, chute visqueuse') graph.setXTitle(r'$t$') graph.setYTitle(r'$z$') otv.View(graph) # Here we have to suppress the Dirac component distX = distX.getMarginal(range(4)) alti = ot.PointToFieldConnection( alti, ot.SymbolicFunction(["z0", "v0", "m", "c"], ["z0", "v0", "m", "c", "0.0"])) inputSample = inputSample.getMarginal(range(4)) postProcessing = ot.KarhunenLoeveLifting(KLResult) outputSampleChaos = KLResult.project(outputSample) size = 20 validationInputSample = distX.getSample(size) validationOutputSample = alti(validationInputSample) # First, using the most basic interface algo = ot.FunctionalChaosAlgorithm(inputSample, outputSampleChaos) algo.run() metaModel = ot.PointToFieldConnection(postProcessing, algo.getResult().getMetaModel()) graph = validationOutputSample.drawMarginal(0) graph.setColors(['red']) graph2 = metaModel(validationInputSample).drawMarginal(0)