ams_parameters = ot.ArcsineMuSigma(8.4, 2.25) myStudy.add('ams_parameters', ams_parameters) # BetaMuSigma parameter save bms_parameters = ot.BetaMuSigma(0.2, 0.6, -1, 2) myStudy.add('bms_parameters', bms_parameters) # GammaMuSigma parameter save gmms_parameters = ot.GammaMuSigma(1.5, 2.5, -0.5) myStudy.add('gmms_parameters', gmms_parameters) # GumbelMuSigma parameter save gms_parameters = ot.GumbelMuSigma(1.5, 1.3) myStudy.add('gms_parameters', gms_parameters) # LogNormalMuSigma parameter save lnms_parameters = ot.LogNormalMuSigma(30000.0, 9000.0, 15000) myStudy.add('lnms_parameters', lnms_parameters) # LogNormalMuSigmaOverMu parameter save lnmsm_parameters = ot.LogNormalMuSigmaOverMu(0.63, 5.24, -0.5) myStudy.add('lnmsm_parameters', lnmsm_parameters) # WeibullMinMuSigma parameter save wms_parameters = ot.WeibullMinMuSigma(1.3, 1.23, -0.5) myStudy.add('wms_parameters', wms_parameters) # MemoizeFunction f = ot.SymbolicFunction(['x1', 'x2'], ['x1*x2']) memoize = ot.MemoizeFunction(f) memoize([5, 6]) myStudy.add('memoize', memoize) # print ('Study = ' , myStudy) myStudy.save() # Create a new Study Object
#! /usr/bin/env python import openturns as ot distParams = [] distParams.append(ot.ArcsineMuSigma(8.4, 2.25)) distParams.append(ot.BetaMuSigma(0.2, 0.6, -1, 2)) distParams.append(ot.GammaMuSigma(1.5, 2.5, -0.5)) distParams.append(ot.GumbelLambdaGamma(0.6, 6.0)) distParams.append(ot.GumbelMuSigma(1.5, 1.3)) distParams.append(ot.LogNormalMuErrorFactor(0.63, 1.5, -0.5)) distParams.append(ot.LogNormalMuSigma(0.63, 3.3, -0.5)) distParams.append(ot.LogNormalMuSigmaOverMu(0.63, 5.24, -0.5)) distParams.append(ot.WeibullMaxMuSigma(1.3, 1.23, 3.1)) distParams.append(ot.WeibullMinMuSigma(1.3, 1.23, -0.5)) for distParam in distParams: print('Distribution Parameters ', repr(distParam)) print('Distribution Parameters ', distParam) non_native = distParam.getValues() desc = distParam.getDescription() print('non-native=', non_native, desc) native = distParam.evaluate() print('native=', native) non_native = distParam.inverse(native) print('non-native=', non_native) print('built dist=', distParam.getDistribution()) # derivative of the native parameters with regards the parameters of the
#!/usr/bin/env python from __future__ import print_function import openturns as ot import otmorris poutre = ot.SymbolicFunction(['L', 'b', 'h', 'E', 'F'], ['F * L^3 / (48 * E * b * h^3 / 12)']) # define the model L = ot.ParametrizedDistribution(ot.LogNormalMuSigmaOverMu(5., 0.02)) b = ot.ParametrizedDistribution(ot.LogNormalMuSigmaOverMu(2., 0.05)) h = ot.ParametrizedDistribution(ot.LogNormalMuSigmaOverMu(0.4, 0.05)) E = ot.ParametrizedDistribution(ot.LogNormalMuSigmaOverMu(3e4, 0.12)) F = ot.ParametrizedDistribution(ot.LogNormalMuSigmaOverMu(0.1, 0.20)) list_marginals = [L, b, h, E, F] distribution = ot.ComposedDistribution(list_marginals) distribution.setDescription(('L', 'b', 'h', 'E', 'F')) dim = distribution.getDimension() level_number = 4 trajectories = 10 jump_step = int(level_number / 2) levels = [level_number] * dim # set the bounds of the grid experiment bound = ot.Interval( [marginal.computeQuantile(0.01)[0] for marginal in list_marginals], [marginal.computeQuantile(0.99)[0] for marginal in list_marginals]) experiment = otmorris.MorrisExperimentGrid(levels, bound, trajectories) experiment.setJumpStep(ot.Indices([jump_step] * dim))
ref_sampleSize, ref_nrepetitions) # Martinez runConvergence(model_ishigami, distribution_ishigami, MartinezSensitivityAlgorithm, list_sampleSize, ref_sampleSize, ref_nrepetitions) ################################################################################ ################# POUTRE ##################### ################################################################################ model_poutre = ot.SymbolicFunction(['L', 'b', 'h', 'E', 'F'], ['F * L^3 / (48 * E * b * h^3 / 12)']) model_poutre.setName("poutre") L = ot.LogNormal() L.setParameter(ot.LogNormalMuSigmaOverMu()([5., .02, 0.])) b = ot.LogNormal() b.setParameter(ot.LogNormalMuSigmaOverMu()([.2, .05, 0.])) h = ot.LogNormal() h.setParameter(ot.LogNormalMuSigmaOverMu()([.4, .05, 0.])) E = ot.LogNormal() E.setParameter(ot.LogNormalMuSigmaOverMu()([3e4, .12, 0.])) F = ot.LogNormal() F.setParameter(ot.LogNormalMuSigmaOverMu()([.1, .20, 0.])) distribution_poutre = ot.ComposedDistribution([L, b, h, E, F]) # Saltelli runConvergence(model_poutre, distribution_poutre, SaltelliSensitivityAlgorithm, list_sampleSize, ref_sampleSize, ref_nrepetitions)
# Martinez runConvergence( model_ishigami, distribution_ishigami, MartinezSensitivityAlgorithm, list_sampleSize, ) # POUTRE model_poutre = ot.SymbolicFunction( ["L", "b", "h", "E", "F"], ["F * L^3 / (48 * E * b * h^3 / 12)"] ) model_poutre.setName("poutre") L = ot.LogNormal() L.setParameter(ot.LogNormalMuSigmaOverMu()([5.0, 0.02, 0.0])) b = ot.LogNormal() b.setParameter(ot.LogNormalMuSigmaOverMu()([0.2, 0.05, 0.0])) h = ot.LogNormal() h.setParameter(ot.LogNormalMuSigmaOverMu()([0.4, 0.05, 0.0])) E = ot.LogNormal() E.setParameter(ot.LogNormalMuSigmaOverMu()([3e4, 0.12, 0.0])) F = ot.LogNormal() F.setParameter(ot.LogNormalMuSigmaOverMu()([0.1, 0.20, 0.0])) distribution_poutre = ot.ComposedDistribution([L, b, h, E, F]) # Saltelli runConvergence( model_poutre, distribution_poutre, SaltelliSensitivityAlgorithm,