import openturns as ot ot.TESTPREAMBLE() # Left hand side of the composition left = ot.SymbolicFunction(['x1', 'x2'], ['x1*sin(x2)', 'cos(x1+x2)', '(x2+1)*exp(x1-2*x2)']) # Right hand side of the composition right = ot.SymbolicFunction( ['x1', 'x2', 'x3', 'x4'], ['(x1*x1+x2^3*x1)/(2*x3*x3+x4^4+1)', 'cos(x2*x2+x4)/(x1*x1+1+x3^4)']) # Compositon of left and right composed = ot.ComposedFunction(left, right) print("right=", repr(right)) print("left=", repr(left)) print("composed=", repr(composed)) # Does it work? x = ot.Point(right.getInputDimension(), 1.0) y = right(x) z = left(y) Dy = right.gradient(x) Dz = left.gradient(y) print("x=", repr(x), " y=right(x)=", repr(y), " z=left(y)=", repr(z)) print("left(right(x))=", repr(composed(x))) print("D(right)(x)=", repr(Dy), " D(left)(y)=", repr(Dz))
# This is calligraphic J, the non-robust objective function calJ = ot.SymbolicFunction( ['x1', 'x2'], ['15.0 * (x1^2 + x2^2) - 100.0 * exp(-5. * ((x1 + 1.6)^2+(x2 + 1.6)^2))']) # This is calligraphic G, the non-robust inequality constraints function calG = ot.SymbolicFunction( ['x1', 'x2'], ['(x1 - 0.5)^2 + x2^2 - 4.0', '(x1 + 0.5)^2 + x2^2 - 4.0']) # This is the perturbation function noise = ot.SymbolicFunction(['x1', 'x2', 'xi1', 'xi2'], ['x1 + xi1', 'x2 + xi2']) # This is capital J: J(x,xi) = calJ(x+xi), the parametric objective function JFull = ot.ComposedFunction(calJ, noise) J = ot.ParametricFunction(JFull, [2, 3], [0.0] * 2) # This is g, the parametric constraints gFull = ot.ComposedFunction(calG, noise) g = ot.ParametricFunction(gFull, [2, 3], [0.0] * 2) bounds = ot.Interval([-3.0] * 2, [3.0] * 2) solver = ot.NLopt('LD_SLSQP') solver.setMaximumIterationNumber(100) for sigma_xi in [0.1, 0.2, 0.3, 0.4, 0.5]: thetaDist = ot.Normal([0.0] * 2, [sigma_xi] * 2, ot.IdentityMatrix(2)) robustnessMeasure = otrobopt.MeanMeasure(J, thetaDist) reliabilityMeasure = otrobopt.JointChanceMeasure(g, thetaDist, ot.Less(),
import openturns as ot from openturns.viewer import View g = ot.SymbolicFunction(['x'], ['sin(x)']) f = ot.SymbolicFunction(['y'], ['abs(y)']) composed = ot.ComposedFunction(f, g) graph = composed.draw(0.0, 10.0) graph.setTitle('y=abs(sin(x))') View(graph, figure_kwargs={'figsize': (8, 4)}, add_legend=True).ShowAll()
# value of the constant. The basis is built with the :class:`~openturns.ConstantBasisFactory` class. basis = ot.ConstantBasisFactory(dimension).build() # %% # We build the kriging algorithm by giving it the transformed data, the output data, the covariance # model and the basis. algo = ot.KrigingAlgorithm(myTransform(Xtrain), Ytrain, covarianceModel, basis) # %% # We can run the algorithm and store the result : algo.run() result = algo.getResult() # %% # The metamodel is the following :class:`~openturns.ComposedFunction` : metamodel = ot.ComposedFunction(result.getMetaModel(), myTransform) # %% # We can draw the metamodel and the exact model on the same graph. graph = plot_exact_model() y_test = metamodel(x_test) curve = ot.Curve(x_test,y_test) curve.setLineStyle("dashed") curve.setColor("red") graph.add(curve) graph.setLegends(['exact model','training data','kriging metamodel']) graph.setLegendPosition("bottom") graph.setTitle('1D Kriging : exact model and metamodel') view = otv.View(graph) # %%
# - biased: HSICVStat. # estimatorType = ot.HSICUStat() # We define a distance function for the weights # For the TSA, the critical domain is [5,+inf]. interval = ot.Interval(5, float('inf')) g = ot.DistanceToDomainFunction(interval) stdDev = Y.computeStandardDeviation()[0] foo = ot.SymbolicFunction(["x", "s"], ["exp(-x/s)"]) g2 = ot.ParametricFunction(foo, [1], [0.1*stdDev]) # The filter function filterFunction = ot.ComposedFunction(g2, g) # We eventually build the HSIC object! TSA = ot.HSICEstimatorTargetSensitivity( covarianceList, X, Y, estimatorType, filterFunction) # We get the R2-HSIC R2HSIC = TSA.getR2HSICIndices() ott.assert_almost_equal(R2HSIC, [0.26863688, 0.00468423, 0.00339962]) # and the HSIC indices HSICIndices = TSA.getHSICIndices() ott.assert_almost_equal(HSICIndices, [0.00107494, 0.00001868, 0.00001411]) # We get the asymptotic pvalue
# ot.Log.Show(ot.Log.INFO) ot.TBB.Disable() # # branin dim = 2 # model branin = ot.SymbolicFunction(['x1', 'x2'], [ '((x2-(5.1/(4*pi_^2))*x1^2+5*x1/pi_-6)^2+10*(1-1/8*pi_)*cos(x1)+10-54.8104)/51.9496', '0.96' ]) transfo = ot.SymbolicFunction(['u1', 'u2'], ['15*u1-5', '15*u2']) model = ot.ComposedFunction(branin, transfo) # problem problem = ot.OptimizationProblem() problem.setObjective(model) bounds = ot.Interval([0.0] * dim, [1.0] * dim) problem.setBounds(bounds) # design experiment = ot.Box([1, 1]) inputSample = experiment.generate() modelEval = model(inputSample) outputSample = modelEval.getMarginal(0) # first kriging model covarianceModel = ot.SquaredExponential([0.3007, 0.2483], [0.981959])
# %% # Compare the instrumental density to the target density. graph = f.draw(lower_bound, upper_bound, 100) graph.setTitle("Instrumental PDF") graph.setXTitle("") graph.setYTitle("") graph.add(instrumentalDistribution.drawPDF(lower_bound, upper_bound, 100)) graph.setLegendPosition("topright") graph.setLegends(["Unnormalized target density", "Instrumental PDF"]) _ = View(graph) # %% # :class:`~MetropolisHastings` and derived classes can work directly with the logarithm of the target density. log_density = ot.ComposedFunction(ot.SymbolicFunction("x", "log(x)"), f) # %% # In this case, it is easier to directly write it as a :class:`~openturns.SymbolicFunction`. log_density = ot.SymbolicFunction( "x", "log(2 + sin(x)^2) - (2 + cos(3*x)^3 + sin(2*x)^3) * x") initialState = ot.Point([3.0]) # not important in this case support = ot.Interval([lower_bound], [upper_bound]) independentMH = ot.IndependentMetropolisHastings(log_density, support, initialState, instrumentalDistribution, [0]) # %% # Get a sample
Create a composed function ========================== """ # %% # In this example we are going to create a composed function :math:`f\circ g` # # %% from __future__ import print_function import openturns as ot import openturns.viewer as viewer from matplotlib import pylab as plt import math as m ot.Log.Show(ot.Log.NONE) # %% # assume f, g functions g = ot.SymbolicFunction(['x1', 'x2'], ['x1 + x2','3 * x1 * x2']) f = ot.SymbolicFunction(['x1', 'x2'], ['2 * x1 - x2']) # %% # create the composed function function = ot.ComposedFunction(f, g) # %% # evaluate the function x = [3.0, 4.0] y = function(x) print('x=', x, 'y=', y)
# - the 2D-transform :math:`T` ; # - the 1D-transform :math:`T_1` and the second component unchanged ; # # and observe the results are the same. zi1D = [transformX1([xi[0]])[0], xi[1]] zi2D = transformation(xi) print("zi = ", zi) print("zi1D = ", zi1D) print("zi2D = ", zi2D) # %% # We can represent the boundary of the event in the standard space : that is a composition of the # hyperbole :math:`h : x \mapsto 10/x` and the inverse transform :math:`T_1^{-1}` defined by # :math:`inverseTransformX1`. failureBoundaryPhysicalSpace = ot.SymbolicFunction(['x'], ['10.0 / x']) failureBoundaryStandardSpace = ot.ComposedFunction( failureBoundaryPhysicalSpace, inverseTransformX1) x = np.linspace(1.1, 5.0, 100) cx = np.array([failureBoundaryStandardSpace([xi])[0] for xi in x]) graphStandardSpace = ot.Graph('Failure event in the standard space', r'$u_1$', r'$u_2$', True, '') curveCX = ot.Curve(x, cx, 'Boundary of the event $\partial \mathcal{D}$') curveCX.setLineStyle("solid") curveCX.setColor("blue") graphStandardSpace.add(curveCX) # %% # We add the origin to the previous graph. cloud = ot.Cloud([0.0], [0.0]) cloud.setColor("black") cloud.setPointStyle("fcircle")
def run(self): """ Launch the algorithm and build the POD models. Notes ----- This method launches the iterative algorithm. First the censored data are filtered if needed. The Box Cox transformation is performed if it is enabled. Then the enrichment of the design of experiments is performed. Once the algorithm stops, it builds the POD models : conditional samples are simulated for each defect size, then the distributions of the probability estimator (for MC simulation) are built. Eventually, a sample of this distribution is used to compute the mean POD and the POD at the confidence level. """ # Create an initial uniform distribution if not given if self._distribution is None: inputMin = self._input.getMin() inputMin[0] = np.min(self._defectSizes) inputMax = self._input.getMax() inputMax[0] = np.max(self._defectSizes) marginals = [ot.Uniform(inputMin[i], inputMax[i]) for i in range(self._dim)] self._distribution = ot.ComposedDistribution(marginals) # Create the design of experiments of the candidate points where the # criterion is computed if self._distribution.hasIndependentCopula(): # without copula use low discrepancy experiment as first doe doeCandidate = ot.LowDiscrepancyExperiment(ot.SobolSequence(), self._distribution, self._candidateSize).generate() else: # else simple Monte Carlo distribution doeCandidate = self._distribution.getSample(self._candidateSize) # build initial kriging model # build the kriging model without optimization algoKriging, transformation = self._buildKrigingAlgo(self._input, self._signals) if self._verbose: print('Building the kriging model') print('Optimization of the covariance model parameters...') llDim = algoKriging.getReducedLogLikelihoodFunction().getInputDimension() lowerBound = [0.001] * llDim upperBound = [50] * llDim algoKriging = self._estimKrigingTheta(algoKriging, lowerBound, upperBound, self._initialStartSize) algoKriging.run() # Get kriging results self._krigingResult = algoKriging.getResult() self._covarianceModel = self._krigingResult.getCovarianceModel() self._basis = self._krigingResult.getBasisCollection() metamodel = ot.ComposedFunction(self._krigingResult.getMetaModel(), transformation) self._Q2 = self._computeQ2(self._input, self._signals, self._krigingResult, transformation) if self._verbose: print('Kriging validation Q2 (>0.9): {:0.4f}\n'.format(self._Q2)) plt.ion() # Start the improvment loop iteration = 0 while iteration < self._nIteration: iteration += 1 if self._verbose: print('Iteration : {}/{}'.format(iteration, self._nIteration)) # compute POD (ptrue = pn-1) for bias reducing in the criterion # Monte Carlo for all defect sizes in a vectorized way. # get Sample for all parameters except the defect size samplePred = self._distribution.getSample(self._samplingSize)[:,1:] fullSamplePred = ot.Sample(self._samplingSize * self._defectNumber, self._dim) # Add the defect sizes as first value for i, defect in enumerate(self._defectSizes): fullSamplePred[self._samplingSize*i:self._samplingSize*(i+1), :] = \ self._mergeDefectInX(defect, samplePred) meanPredictionSample = metamodel(fullSamplePred) meanPredictionSample = np.reshape(meanPredictionSample, (self._samplingSize, self._defectNumber), 'F') # compute the POD for all defect sizes currentPOD = np.mean(meanPredictionSample > self._detectionBoxCox, axis=0) # Compute criterion for all candidate in the candidate doe criterion = 1000000000 for icand, candidate in enumerate(doeCandidate): # add the current candidate to the kriging doe inputAugmented = self._input[:] inputAugmented.add(candidate) signalsAugmented = self._signals[:] # predict the signal value of the candidate using the current # kriging model signalsAugmented.add(metamodel(candidate)) # create a temporary kriging model with the new doe and without # updating the covariance model parameters # normalization mean = inputAugmented.computeMean() try: stddev = inputAugmented.computeStandardDeviation() except AttributeError: stddev = inputAugmented.computeStandardDeviationPerComponent() linear = ot.SquareMatrix(self._dim) for j in range(self._dim): linear[j, j] = 1.0 / stddev[j] if abs(stddev[j]) > 1e-12 else 1.0 zero = [0.0] * self._dim transformation = ot.LinearFunction(mean, zero, linear) algoKrigingTemp = ot.KrigingAlgorithm(transformation(inputAugmented), signalsAugmented, self._covarianceModel, self._basis) optimizer = algoKrigingTemp.getOptimizationAlgorithm() optimizer.setMaximumIterationNumber(0) algoKrigingTemp.setOptimizationAlgorithm(optimizer) algoKrigingTemp.run() krigingResultTemp = algoKrigingTemp.getResult() # compute the criterion for all defect size crit = [] # save results, used to compute the PODModel et PODCLModel PODPerDefect = ot.Sample(self._simulationSize * self._samplingSize, self._defectNumber) for idef, defect in enumerate(self._defectSizes): podSample = self._computePODSamplePerDefect(defect, self._detectionBoxCox, krigingResultTemp, transformation, self._distribution, self._simulationSize, self._samplingSize) PODPerDefect[:, idef] = podSample meanPOD = podSample.computeMean()[0] varPOD = podSample.computeVariance()[0] crit.append(varPOD + (meanPOD - currentPOD[idef])**2) # compute the criterion aggregated for all defect sizes newCriterion = np.sqrt(np.mean(crit)) # check if the result is better or not if newCriterion < criterion: self._PODPerDefect = PODPerDefect criterion = newCriterion indexOpt = icand if self._verbose: updateProgress(icand, int(doeCandidate.getSize()), 'Computing criterion') # get the best candidate candidateOpt = doeCandidate[indexOpt] # add new point to DOE self._input.add(candidateOpt) # add the signal computed by the physical model if self._boxCox: self._signals.add(self._boxCoxTransform(self._physicalModel(candidateOpt) + [self._shift])) else: self._signals.add(self._physicalModel(candidateOpt)) # remove added candidate from the doeCandidate doeCandidate.erase(indexOpt) if self._verbose: print('Criterion value : {:0.4f}'.format(criterion)) print('Added point : {}'.format(candidateOpt)) print('Update the kriging model') # update the kriging model without optimization algoKriging, transformation = self._buildKrigingAlgo(self._input, self._signals) algoKriging.setOptimizeParameters(False) algoKriging.run() self._Q2 = self._computeQ2(self._input, self._signals, algoKriging.getResult(), transformation) # Check the quality of the kriging model if it needs optimization if self._Q2 < 0.95: if self._verbose: print('Optimization of the covariance model parameters...') algoKriging.setOptimizeParameters(True) algoKriging = self._estimKrigingTheta(algoKriging, lowerBound, upperBound, self._initialStartSize) algoKriging.run() # Get kriging results self._krigingResult = algoKriging.getResult() self._covarianceModel = self._krigingResult.getCovarianceModel() self._basis = self._krigingResult.getBasisCollection() self._Q2 = self._computeQ2(self._input, self._signals, self._krigingResult, transformation) if self._verbose: print('Kriging validation Q2 (>0.9): {:0.4f}'.format(self._Q2)) if self._graph: # create the interpolate function of the POD model meanPOD = self._PODPerDefect.computeMean() interpModel = interp1d(self._defectSizes, np.array(meanPOD), kind='linear') self._PODmodel = ot.PythonFunction(1, 1, interpModel) # The POD at confidence level is built in getPODCLModel() directly fig, ax = self.drawPOD(self._probabilityLevel, self._confidenceLevel) plt.draw() plt.pause(0.001) plt.show() if self._graphDirectory is not None: if not os.path.exists(self._graphDirectory): os.makedirs(self._graphDirectory) fig.savefig(os.path.join(self._graphDirectory, 'AdaptiveSignalPOD_')+str(iteration), bbox_inches='tight', transparent=True) # Compute the final POD with the last updated kriging model if self._verbose: print('\nStart computing the POD with the last updated kriging model') # compute the sample containing the POD values for all defect self._PODPerDefect = ot.Sample(self._simulationSize * self._samplingSize, self._defectNumber) for i, defect in enumerate(self._defectSizes): self._PODPerDefect[:, i] = self._computePODSamplePerDefect(defect, self._detectionBoxCox, self._krigingResult, transformation, self._distribution, self._simulationSize, self._samplingSize) if self._verbose: updateProgress(i, self._defectNumber, 'Computing POD per defect') # compute the mean POD meanPOD = self._PODPerDefect.computeMean() # create the interpolate function of the POD model interpModel = interp1d(self._defectSizes, np.array(meanPOD), kind='linear') self._PODmodel = ot.PythonFunction(1, 1, interpModel) # The POD at confidence level is built in getPODCLModel() directly # remove the interactive plotting plt.ioff()
# - unbiased: HSICUStat (not available here!!); # - biased: HSICVStat. # estimatorType = ot.HSICVStat() # We define a distance function for the weights # For the TSA, the critical domain is [5,+inf]. interval = ot.Interval(5, float('inf')) g = ot.DistanceToDomainFunction(interval) stdDev = Y.computeStandardDeviation()[0] foo = ot.SymbolicFunction(["x", "s"], ["exp(-x/s)"]) g2 = ot.ParametricFunction(foo, [1], [0.1 * stdDev]) # The weight function weight = ot.ComposedFunction(g2, g) # We eventually build the HSIC object CSA = ot.HSICEstimatorConditionalSensitivity(covarianceModelCollection, X, Y, estimatorType, weight) # We get the R2-HSIC R2HSIC = CSA.getR2HSICIndices() ott.assert_almost_equal(R2HSIC, [0.03717355, 0.00524130, 0.23551919]) # and the HSIC indices HSICIndices = CSA.getHSICIndices() ott.assert_almost_equal(HSICIndices, [0.00064033, 0.00025769, 0.01105157]) # We set the number of permutations for the pvalue estimate b = 100
def run(self): """ Build the POD models. Notes ----- This method build the polynomial chaos model. First the censored data are filtered if needed. The Box Cox transformation is performed if it is enabled. Then it builds the POD models, the Monte Carlo simulation is performed for each given defect sizes. The confidence interval is computed by simulating new coefficients of the polynomial chaos, then Monte Carlo simulations are performed. """ # run the chaos algorithm and get result if not given if not self._userChaos: if self._verbose: print('Start build polynomial chaos model...') self._algoChaos = self._buildChaosAlgo(self._input, self._signals) self._algoChaos.run() if self._verbose: print('Polynomial chaos model completed') self._chaosResult = self._algoChaos.getResult() # get the metamodel self._chaosPred = self._chaosResult.getMetaModel() # get the basis, coef and transformation, needed for the confidence interval self._chaosCoefs = self._chaosResult.getCoefficients() self._reducedBasis = self._chaosResult.getReducedBasis() self._transformation = self._chaosResult.getTransformation() self._basisFunction = ot.ComposedFunction(ot.AggregatedFunction( self._reducedBasis), self._transformation) # compute the residuals and stderr inputSize = self._input.getSize() basisSize = self._reducedBasis.getSize() self._residuals = self._signals - self._chaosPred(self._input) # residuals self._stderr = np.sqrt(np.sum(np.array(self._residuals)**2) / (inputSize - basisSize - 1)) # Check the quality of the chaos model R2 = self.getR2() Q2 = self.getQ2() if self._verbose: print('Polynomial chaos validation R2 (>0.8) : {:0.4f}'.format(R2)) print('Polynomial chaos validation Q2 (>0.8) : {:0.4f}'.format(Q2)) # Compute the POD values for each defect sizes self.POD = self._computePOD(self._defectSizes, self._chaosCoefs) # create the interpolate function interpModel = interp1d(self._defectSizes, self.POD, kind='linear') self._PODmodel = ot.PythonFunction(1, 1, interpModel) ####################### confidence interval ############################ dof = inputSize - basisSize - 1 varEpsilon = (ot.ChiSquare(dof).inverse() * dof * self._stderr**2).getRealization()[0] gramBasis = ot.Matrix(self._basisFunction(self._input)).computeGram() covMatrix = gramBasis.solveLinearSystem(ot.IdentityMatrix(basisSize)) * varEpsilon self._coefsDist = ot.Normal(np.hstack(self._chaosCoefs), ot.CovarianceMatrix(covMatrix.getImplementation())) coefsRandom = self._coefsDist.getSample(self._simulationSize) self._PODPerDefect = ot.Sample(self._simulationSize, self._defectNumber) for i, coefs in enumerate(coefsRandom): self._PODPerDefect[i, :] = self._computePOD(self._defectSizes, coefs) if self._verbose: updateProgress(i, self._simulationSize, 'Computing POD per defect')
# %% # We have access to the distance to this domain thanks to the # :class:`~openturns.DistanceToDomainFunction` class. dist2criticalDomain = ot.DistanceToDomainFunction(criticalDomain) # %% # We define the parameters in our function from the output sample s = 0.1 * Y.computeStandardDeviation()[0] # %% # We now define our filter function by composition of the parametrized function and # the distance function. f = ot.SymbolicFunction(["x", "s"], ["exp(-x/s)"]) phi = ot.ParametricFunction(f, [1], [s]) filterFunction = ot.ComposedFunction(phi, dist2criticalDomain) # %% # We choose an unbiased estimator estimatorType = ot.HSICUStat() # %% # and build the HSIC estimator targetHSIC = ot.HSICEstimatorTargetSensitivity(covarianceModelCollection, X, Y, estimatorType, filterFunction) # %% # We get the R2-HSIC indices: R2HSICIndices = targetHSIC.getR2HSICIndices() print("\n Target HSIC analysis") print("R2-HSIC Indices: ", R2HSICIndices)