Beispiel #1
0
def computeReferenceStandardDeviation(model, distribution, sobol_estimator,
                                      ref_sampleSize, ref_nrepetitions):
    # # Get the asymptotic variance with 10000 sample and 1000 repetitions as reference
    # sensitivity_test = SensitivityConfidenceTest(model, distribution,
    #                                              sobol_estimator,
    #                                              sampleSize=ref_sampleSize,
    #                                              nrepetitions=ref_nrepetitions)
    # std_first_empirical = sensitivity_test.std_first_empirical
    # std_total_empirical = sensitivity_test.std_total_empirical
    # return std_first_empirical, std_total_empirical
    sobolexperiment = ot.SobolIndicesExperiment(distribution,
                                                int(ref_sampleSize), False)
    inputDesign = sobolexperiment.generate()
    outputDesign = model(inputDesign)
    sensitivity_algorithm = sobol_estimator(inputDesign, outputDesign,
                                            int(ref_sampleSize))

    # Récupère les distributions asymptotiques
    distFirstCol = sensitivity_algorithm.getFirstOrderAsymptoticDistribution()
    distTotalCol = sensitivity_algorithm.getTotalOrderAsymptoticDistribution()
    std_first_asymptotic = ot.Point(
        [d.getStandardDeviation()[0] for d in distFirstCol])
    std_total_asymptotic = ot.Point(
        [d.getStandardDeviation()[0] for d in distTotalCol])
    return std_first_asymptotic, std_total_asymptotic
Beispiel #2
0
def computeErrors(model, distribution, sobol_estimator, sampleSize, ref_std_fo,
                  ref_std_to):

    sobolexperiment = ot.SobolIndicesExperiment(distribution, int(sampleSize),
                                                False)
    inputDesign = sobolexperiment.generate()
    outputDesign = model(inputDesign)
    sensitivity_algorithm = sobol_estimator(inputDesign, outputDesign,
                                            int(sampleSize))

    # Récupère les distributions asymptotiques
    distFirstCol = sensitivity_algorithm.getFirstOrderAsymptoticDistribution()
    distTotalCol = sensitivity_algorithm.getTotalOrderAsymptoticDistribution()
    std_first_asymptotic = ot.Point(
        [d.getStandardDeviation()[0] for d in distFirstCol])
    std_total_asymptotic = ot.Point(
        [d.getStandardDeviation()[0] for d in distTotalCol])

    # Compute absolute errors
    AE_vfo = np.abs(ref_std_fo - std_first_asymptotic)
    AE_vto = np.abs(ref_std_to - std_total_asymptotic)
    # compute relative errors
    RE_vfo = AE_vfo / ref_std_fo
    RE_vto = AE_vto / ref_std_to

    absErrFirst = np.max([AE_vfo])
    absErrTotal = np.max([AE_vto])
    relErrFirt = np.max([RE_vfo])
    relErrTotal = np.max([RE_vto])
    return absErrFirst, absErrTotal, relErrFirt, relErrTotal
def myMauntzKucherenkoSobolEstimator(distribution, size, model):
    inputDesign = ot.SobolIndicesExperiment(distribution, size,
                                            True).generate()
    outputDesign = model(inputDesign)
    sensitivityAnalysis = ot.MauntzKucherenkoSensitivityAlgorithm(
        inputDesign, outputDesign, size)
    return sensitivityAnalysis
def mySaltelliSamplingMethodLHSIndicesExperiment(distribution, size, model):
    ot.ResourceMap.SetAsString('SobolIndicesExperiment-SamplingMethod', 'LHS')
    inputDesign = ot.SobolIndicesExperiment(distribution, size,
                                            True).generate()
    outputDesign = model(inputDesign)
    sensitivity_algorithm = ot.SaltelliSensitivityAlgorithm(
        inputDesign, outputDesign, size)
    return sensitivity_algorithm
Beispiel #5
0
    def run(self):
        """
        Compute the Sobol indices with the chosen algorithm. 
        """

        # create the Function which computes the POD for a given
        # realization and for all defect sizes.
        if self._podType == "kriging":
            self._PODaggr = ot.Function(
                PODaggrKriging(self._POD, self._dim, self._defectSizes,
                               self._detectionBoxCox))
        elif self._podType == "chaos":
            self._PODaggr = ot.Function(
                PODaggrChaos(self._POD, self._dim, self._defectSizes,
                             self._detectionBoxCox, self._simulationSize))

        input_design = ot.SobolIndicesExperiment(self._distribution, self._N,
                                                 False).generate()
        output_design = self._PODaggr(input_design)

        # remove the output marginal when the variance is null because it causes
        # a failure. Must remove also the associated defect sizes.
        selected_marginal_index = []
        for index_output in range(output_design.getDimension()):
            if output_design.getMarginal(
                    index_output)[:self._N].computeCovariance()[0, 0] != 0:
                selected_marginal_index.append(index_output)

        if len(selected_marginal_index) != output_design.getDimension():
            self.setDefectSizes(self._defectSizes[selected_marginal_index])
            selected_output_design = np.array(
                output_design)[:, selected_marginal_index]

            logging.warning("Warning : some output variances are null. " + \
                         "Only the following defect sizes are taken into " + \
                         "account : {}".format(self._defectSizes))
        else:
            selected_output_design = output_design

        if self._method == "Saltelli":
            self._sa = ot.SaltelliSensitivityAlgorithm(input_design,
                                                       selected_output_design,
                                                       self._N)
        elif self._method == "Martinez":
            self._sa = ot.MartinezSensitivityAlgorithm(input_design,
                                                       selected_output_design,
                                                       self._N)
        elif self._method == "Jansen":
            self._sa = ot.JansenSensitivityAlgorithm(input_design,
                                                     selected_output_design,
                                                     self._N)
        elif self._method == "MauntzKucherenko":
            self._sa = ot.MauntzKucherenkoSensitivityAlgorithm(
                input_design, selected_output_design, self._N)

        self._sa.setUseAsymptoticDistribution(True)
Beispiel #6
0
    def compute_sample_indices(self):

        # Estimations des indices du premier ordre
        sampleFirst = zeros((self.nrepetitions, self.dim))

        # Estimations des indices totaux
        sampleTotal = zeros((self.nrepetitions, self.dim))

        # loi asymptotique
        distFirstCol = [object] * self.nrepetitions
        distTotalCol = [object] * self.nrepetitions

        # set seed of the random generator
        ot.RandomGenerator.SetSeed(self.seed)
        for i in range(self.nrepetitions):
            sobolexperiment = ot.SobolIndicesExperiment(
                self.distribution, int(self.sampleSize), False)
            inputDesign = sobolexperiment.generate()
            outputDesign = self.model(inputDesign)
            self.sensitivity_algorithm = self.sobol_estimator(
                inputDesign, outputDesign, int(self.sampleSize))
            # self.sensitivity_algorithm = self.sobol_estimator(self.distribution,
            #                             int(self.sampleSize), self.model)
            self.sensitivity_algorithm.setConfidenceLevel(self.alpha)
            fo = self.sensitivity_algorithm.getAggregatedFirstOrderIndices()
            to = self.sensitivity_algorithm.getAggregatedTotalOrderIndices()
            # Récupère les distributions asymptotiques
            distFirstCol[
                i] = self.sensitivity_algorithm.getFirstOrderAsymptoticDistribution(
                )
            distTotalCol[
                i] = self.sensitivity_algorithm.getTotalOrderAsymptoticDistribution(
                )
            for j in range(self.dim):
                sampleFirst[i, j] = fo[j]
            for j in range(self.dim):
                sampleTotal[i, j] = to[j]

        # Récupère l'intervalle de confiance bootstrap pour le dernier échantillon
        foInterval = self.sensitivity_algorithm.getFirstOrderIndicesInterval()
        toInterval = self.sensitivity_algorithm.getTotalOrderIndicesInterval()

        # compute empirical variance
        self.std_first_empirical = ot.Sample(
            sampleFirst).computeStandardDeviation()
        self.std_total_empirical = ot.Sample(
            sampleTotal).computeStandardDeviation()
        return (
            sampleFirst,
            sampleTotal,
            foInterval,
            toInterval,
            distFirstCol,
            distTotalCol,
        )
    def test_GaussianSum2(self):
        mu = [0.0] * 4
        sigma = [1.0, 2.0, 3.0, 4.0]
        a = [0.0, 1.0, 1.0, 1.0, 1.0]
        problem = otb.GaussianSumSensitivity(a, mu, sigma)
        distribution = problem.getInputDistribution()
        model = problem.getFunction()

        # Create X/Y data
        ot.RandomGenerator.SetSeed(0)
        size = 10000
        inputDesign = ot.SobolIndicesExperiment(distribution, size,
                                                True).generate()
        outputDesign = model(inputDesign)

        # Compute first order indices using the Saltelli estimator
        sensitivityAnalysis = ot.SaltelliSensitivityAlgorithm(
            inputDesign, outputDesign, size)
        computed_first_order = sensitivityAnalysis.getFirstOrderIndices()
        computed_total_order = sensitivityAnalysis.getTotalOrderIndices()

        # Exact first and total order
        exact_first_order = problem.getFirstOrderIndices()
        exact_total_order = problem.getTotalOrderIndices()

        # Check exact results
        Sexact = [1.0 / 30.0, 4.0 / 30, 9.0 / 30, 16.0 / 30]
        Texact = [1.0 / 30.0, 4.0 / 30, 9.0 / 30, 16.0 / 30]
        np.testing.assert_allclose(Sexact, exact_first_order)
        np.testing.assert_allclose(Texact, exact_total_order)

        # Compare with exact results
        print("Sample size : ", size)
        atol = 5.0 / np.sqrt(size)
        print("Absolute Tolerance = ", atol)
        # First order
        # Compute absolute error (the LRE cannot be computed,
        # because S can be zero)
        print("Computed first order = ", computed_first_order)
        print("Exact first order = ", exact_first_order)
        np.testing.assert_allclose(computed_first_order,
                                   exact_first_order,
                                   atol=atol)
        # Total order
        print("Computed total order = ", computed_total_order)
        print("Exact total order = ", exact_total_order)
        np.testing.assert_allclose(computed_total_order,
                                   exact_total_order,
                                   atol=atol)
Beispiel #8
0
    def test_sobol(self):
        temp_path = tempfile.mkdtemp()
        path_mo = os.path.join(temp_path, 'IshigamiFunction.mo')
        path_fmu = os.path.join(temp_path, 'IshigamiFunction.fmu')
        with open(path_mo, "w") as mo:
            mo.write('model IshigamiFunction\n')
            mo.write('  final parameter Real a = 7;\n')
            mo.write('  final parameter Real b = 0.05;\n')
            mo.write('  input Real x1 = 1;\n')
            mo.write('  input Real x2 = 1;\n')
            mo.write('  input Real x3 = 1;\n')
            mo.write('  output Real f;\n')
            mo.write('  Real d;\n')
            mo.write('equation\n')
            mo.write('  f = sin(x1) + a * sin(x2)^2 + b * x3^4 * sin(x1);\n')
            mo.write('  der(d) = d + 2;\n')
            mo.write('end IshigamiFunction;\n')
        otfmi.mo2fmu(path_mo, path_fmu, fmuType="cs", verbose=True)

        # reimport fmu
        model_fmu = otfmi.FMUFunction(path_fmu)
        print(model_fmu, model_fmu.getInputDescription(),
              model_fmu.getOutputDescription())
        model_symbolic = ot.SymbolicFunction(
            ['x1', 'x2', 'x3'],
            ['sin(x1) + 7 * sin(x2)^2 + 0.05 * x3^4 * sin(x1)'])

        # Sobol' DOE
        X = ot.ComposedDistribution([ot.Uniform(-m.pi, m.pi)] * 3)
        N = 20
        x = ot.SobolIndicesExperiment(X, N).generate()
        size = len(x)

        # evaluate DOE
        t0 = time()
        process = psutil.Process(os.getpid())
        mem0 = process.memory_info().rss / 1000000
        for i in range(size):
            xi = x[i]
            yi = model_fmu(xi)
            yi_ref = model_symbolic(xi)
            assert m.fabs(yi[0] - yi_ref[0]) < 1e-8, "wrong value"
            print(i, xi, yi, process.memory_info().rss / 1000000, flush=True)
        t1 = time()
        mem1 = process.memory_info().rss / 1000000
        print("Speed=", size / (t1 - t0), "evals/s")
        print("Memory=", mem1 - mem0)
        shutil.rmtree(temp_path)
Beispiel #9
0
def computeVariance(model, distribution, sobol_estimator, sampleSize):

    sobolexperiment = ot.SobolIndicesExperiment(distribution, int(sampleSize),
                                                False)
    inputDesign = sobolexperiment.generate()
    outputDesign = model(inputDesign)
    sensitivity_algorithm = sobol_estimator(inputDesign, outputDesign,
                                            int(sampleSize))

    # Récupère les distributions asymptotiques
    distFirstCol = sensitivity_algorithm.getFirstOrderAsymptoticDistribution()
    distTotalCol = sensitivity_algorithm.getTotalOrderAsymptoticDistribution()
    std_first_asymptotic = ot.Point(
        [d.getStandardDeviation()[0] for d in distFirstCol])
    std_total_asymptotic = ot.Point(
        [d.getStandardDeviation()[0] for d in distTotalCol])

    return std_first_asymptotic, std_total_asymptotic
Beispiel #10
0
    def test_Ishigami(self):
        problem = otb.IshigamiSensitivity()
        print(problem)
        distribution = problem.getInputDistribution()
        model = problem.getFunction()

        # Create X/Y data
        ot.RandomGenerator.SetSeed(0)
        size = 10000
        inputDesign = ot.SobolIndicesExperiment(distribution, size,
                                                True).generate()
        outputDesign = model(inputDesign)

        # Compute first order indices using the Saltelli estimator
        sensitivityAnalysis = ot.SaltelliSensitivityAlgorithm(
            inputDesign, outputDesign, size)
        computed_first_order = sensitivityAnalysis.getFirstOrderIndices()
        computed_total_order = sensitivityAnalysis.getTotalOrderIndices()

        # Exact first and total order
        exact_first_order = problem.getFirstOrderIndices()
        exact_total_order = problem.getTotalOrderIndices()

        # Compare with exact results
        print("Sample size : ", size)
        atol = 10.0 / np.sqrt(size)
        print("Absolute Tolerance = ", atol)
        # First order
        # Compute absolute error (the LRE cannot be computed,
        # because S can be zero)
        print("Computed first order = ", computed_first_order)
        print("Exact first order = ", exact_first_order)
        np.testing.assert_allclose(computed_first_order,
                                   exact_first_order,
                                   atol=atol)
        # Total order
        print("Computed total order = ", computed_total_order)
        print("Exact total order = ", exact_total_order)
        np.testing.assert_allclose(computed_total_order,
                                   exact_total_order,
                                   atol=atol)
def computeVariance(model, distribution, sobol_estimator, sampleSize):
    """
    Compute the asymptotic standard deviation of the Sobol' indices estimator.

    Parameters
    ----------
    model : ot.Function
        The function.
    distribution : ot.Distribution
        The input distribution.
    sobol_estimator : ot.SobolSensitivityAlgorithm
        The estimator.
    sampleSize : int
        The sample size.

    Returns
    -------
    std_first_asymptotic : ot.Point(dimension)
        The componentwise standard deviation of the first order index.
    std_total_asymptotic : ot.Point(dimension)
        The componentwise standard deviation of the total order index.

    """

    sobolexperiment = ot.SobolIndicesExperiment(distribution, int(sampleSize), False)
    inputDesign = sobolexperiment.generate()
    outputDesign = model(inputDesign)
    sensitivity_algorithm = sobol_estimator(inputDesign, outputDesign, int(sampleSize))

    # Récupère les distributions asymptotiques
    distFirstCol = sensitivity_algorithm.getFirstOrderAsymptoticDistribution()
    distTotalCol = sensitivity_algorithm.getTotalOrderAsymptoticDistribution()
    std_first_asymptotic = ot.Point([d.getStandardDeviation()[0] for d in distFirstCol])
    std_total_asymptotic = ot.Point([d.getStandardDeviation()[0] for d in distTotalCol])

    return std_first_asymptotic, std_total_asymptotic
def CostSobol(MyModel, p, m, lower, upper, distribution, indexNumber,
              indexChoice, NSobol, MINMAX):
    '''
    Return the associated sobol index to the measure recovered from the canonical moment sequences
    '''
    dim = len(lower)
    # We concatenate p per block of variable
    if len(m) == dim:
        pp = []
        t = 0
        for i in range(dim):
            pp.append(p[t:t + len(m[i]) + 1])
            t = t + len(m[i]) + 1
    else:
        print('error size of moment vector')

    if indexChoice == 1:
        P = list(
            QD_Algorithm(
                Affine_Transformation(lower[indexNumber], upper[indexNumber],
                                      m[indexNumber]))) + list(pp[indexNumber])
        Position, Weight = Canonical_to_Position([lower[indexNumber]],
                                                 [upper[indexNumber]], P)

        distribution[indexNumber] = ot.Mixture(
            [ot.Dirac(Position[i]) for i in range(len(Position))], Weight)
        composedDistribution = ot.ComposedDistribution(distribution)
        ot.RandomGenerator.SetSeed(0)
        inputDesign = ot.SobolIndicesExperiment(composedDistribution, NSobol,
                                                True).generate()
        outputDesign = MyModel(inputDesign)

        sensitivityAnalysis = ot.SaltelliSensitivityAlgorithm(
            inputDesign, outputDesign, NSobol)
        firstOrder = sensitivityAnalysis.getFirstOrderIndices()
        return MINMAX * firstOrder[indexNumber]

    elif indexChoice == 0:
        t = 0
        P = [[]] * (dim - 1)
        Position = [[]] * (dim - 1)
        Weight = [[]] * (dim - 1)
        for i in range(dim):
            if i != indexNumber:
                P[t] = list(
                    QD_Algorithm(
                        Affine_Transformation(lower[i], upper[i],
                                              m[i]))) + list(pp[i])
                Position[t], Weight[t] = Canonical_to_Position([lower[i]],
                                                               [upper[i]],
                                                               P[t])
                distribution[i] = ot.Mixture([
                    ot.Dirac(Position[t][j]) for j in range(len(Position[t]))
                ], Weight[t])
                t += 1
        composedDistribution = ot.ComposedDistribution(distribution)
        ot.RandomGenerator.SetSeed(0)
        inputDesign = ot.SobolIndicesExperiment(composedDistribution, NSobol,
                                                True).generate()
        outputDesign = MyModel(inputDesign)

        sensitivityAnalysis = ot.SaltelliSensitivityAlgorithm(
            inputDesign, outputDesign, NSobol)
        totalOrder = sensitivityAnalysis.getTotalOrderIndices()
        return MINMAX * totalOrder[indexNumber]
Beispiel #13
0
        print("Total order indices interval = ", interval_to_asymptotic)

# with experiment
sequence = ot.SobolSequence(input_dimension)
experiment = ot.LowDiscrepancyExperiment(
    sequence,
    ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * input_dimension), size)
sensitivity_algorithm = ot.SaltelliSensitivityAlgorithm(experiment, model)
print(sensitivity_algorithm.getFirstOrderIndices())

# multi variate model
model_aggregated = ot.SymbolicFunction(
    ['X1', 'X2', 'X3'],
    ['2*X1 + X2 - 3*X3 + 0.3*X1*X2', '-5*X1 + 4*X2 - 0.8*X2*X3 + 2*X3'])
distribution_aggregated = ot.ComposedDistribution([ot.Uniform()] * 3)
inputDesign = ot.SobolIndicesExperiment(distribution_aggregated,
                                        size).generate()
outputDesign = model_aggregated(inputDesign)
# Case 1 : Estimation of sensitivity using estimator and no bootstrap
for method in methods:
    sensitivity_algorithm = eval(
        'ot.' + method +
        "SensitivityAlgorithm(inputDesign, outputDesign, size)")
    print("Method of evaluation=", method)

    # Get first order indices
    fo = sensitivity_algorithm.getAggregatedFirstOrderIndices()
    print("Aggregated first order indices = ", fo)
    # Get total order indices
    to = sensitivity_algorithm.getAggregatedTotalOrderIndices()
    print("Aggregated total order indices = ", to)
Beispiel #14
0
    alpha = (Zm - Zv)/L
    H = (Q/(Ks*B*alpha**0.5))**0.6
    Zc = H + Zv
    S = Zc - Zd
    return [S]

myFunction = ot.PythonFunction(4, 1, flooding) 
myParam = ot.GumbelAB(1013.0, 558.0)
Q = ot.ParametrizedDistribution(myParam)
Q = ot.TruncatedDistribution(Q, 0.0, ot.SpecFunc.MaxScalar)
Ks = ot.Normal(30.0, 7.5)
Ks = ot.TruncatedDistribution(Ks, 0.0, ot.SpecFunc.MaxScalar)
Zv = ot.Uniform(49.0, 51.0)
Zm = ot.Uniform(54.0, 56.0)
inputX = ot.ComposedDistribution([Q, Ks, Zv, Zm])
inputX.setDescription(["Q","Ks", "Zv", "Zm"])

size = 5000
computeSO = True
inputDesign = ot.SobolIndicesExperiment(inputX, size, computeSO).generate()
outputDesign = myFunction(inputDesign)
sensitivityAnalysis = ot.MartinezSensitivityAlgorithm(inputDesign, outputDesign, size)

graph = sensitivityAnalysis.draw()

fig = plt.figure(figsize=(8, 4))
plt.suptitle(graph.getTitle())
axis = fig.add_subplot(111)
axis.set_xlim(auto=True)
View(graph, figure=fig, axes=[axis], add_legend=True)
# Indices de sensibilité exacts
[muexact, vexact, sexact, stexact] = gsobolSAExact(a)

# Taille du plan d'expérience de base pour estimer S et ST
sampleSize = 1000

# Nombre de répétition de l'expérience
nrepetitions = 500

# Estimations des indices du premier ordre
sampleFirstMartinez = ot.Sample(nrepetitions, 3)

# Estimations des indices totaux
sampleTotalMartinez = ot.Sample(nrepetitions, 3)
for i in range(nrepetitions):
    inputDesign = ot.SobolIndicesExperiment(distribution,
                                            sampleSize).generate()
    outputDesign = gsobol(inputDesign, a)
    sensitivity_algorithm = ot.SaltelliSensitivityAlgorithm(
        inputDesign, outputDesign, sampleSize)
    fo = sensitivity_algorithm.getFirstOrderIndices()
    to = sensitivity_algorithm.getTotalOrderIndices()
    for j in range(d):
        sampleFirstMartinez[i, j] = fo[j]
    for j in range(d):
        sampleTotalMartinez[i, j] = to[j]

fig = pl.figure(figsize=(12, 8))
for j in range(d):
    ax = fig.add_subplot(2, 3, 1 + j)
    graph = ot.HistogramFactory().build(sampleFirstMartinez[:, j]).drawPDF()
    graph.setXTitle("S%d" % (d))
Beispiel #16
0
inputDistribution = ot.Normal(5)
function = ot.SymbolicFunction(['x0', 'x1', 'x2', 'x3', 'x4'],
                               ['x0 + 4.0 * x1 ^ 2 + 3.0 * x2',
                                '-7.0 * x2 - 4.0 * x3 + x4'])

# %%
# Estimate the Sobol' indices
# ---------------------------

# %%
# We first create a design of experiments with `SobolIndicesExperiment`.

# %%
size = 1000
sie = ot.SobolIndicesExperiment(inputDistribution, size)
inputDesign = sie.generate()
input_names = inputDistribution.getDescription()
inputDesign.setDescription(input_names)
print("Sample size: ", inputDesign.getSize())

# %%
# We see that 7000 function evaluations are required to estimate the first order and total Sobol' indices.
# Then we evaluate the outputs corresponding to this design of experiments.

# %%
outputDesign = function(inputDesign)

# %%
# Then we estimate the Sobol' indices with the `SaltelliSensitivityAlgorithm`.
import openturns as ot
import openturns.viewer as viewer
from matplotlib import pylab as plt
ot.Log.Show(ot.Log.NONE)

# %%
# To illustrate the usage of the method mentioned above, we define a set of X/Y data using the :ref:`Ishigami model <use-case-ishigami>`. This classical model is defined in a data class :
im = ishigami_function.IshigamiModel()

# %%
# Create X/Y data
# We get the input variables description :
input_names = im.distributionX.getDescription()

size = 100
inputDesign = ot.SobolIndicesExperiment(
    im.distributionX, size, True).generate()
outputDesign = im.model(inputDesign)

# %%
# Create a :class:`~openturns.CorrelationAnalysis` object to compute various estimates
# of the correlation between the inputs and the output.

corr_analysis = ot.CorrelationAnalysis(inputDesign, outputDesign)

# %%
# PCC coefficients
# ------------------

pcc_indices = corr_analysis.computePCC()
print(pcc_indices)
Beispiel #18
0
#!/usr/bin/env python

from __future__ import print_function
import openturns as ot

size = 8
for dimension in [2, 3]:
    distribution = ot.Normal(dimension)
    for computeSecondOrder in [False, True]:
        experiment = ot.SobolIndicesExperiment(distribution, size,
                                               computeSecondOrder)
        print('experiment=', experiment)
        sample = experiment.generate()
        print('sample=', sample)
        print('size=', sample.getSize())
Beispiel #19
0
    def __init__(self, n_samples, bounds, kind, dists=None, discrete=None):
        """Initialize the DOE generation.

        In case of :attr:`kind` is ``uniform``, :attr:`n_samples` is decimated
        in order to have the same number of points in all dimensions.

        If :attr:`kind` is ``discrete``, a join distribution between a discrete
        uniform distribution is made with continuous distributions.

        Another possibility is to set a list of PDF to sample from. Thus one
        can do: `dists=['Uniform(15., 60.)', 'Normal(4035., 400.)']`. If not
        set, uniform distributions are used.

        :param int n_samples: number of samples.
        :param array_like bounds: Space's corners [[min, n dim], [max, n dim]]
        :param str kind: Sampling Method if string can be one of
          ['halton', 'sobol', 'faure', '[o]lhs[c]', 'sobolscramble', 'uniform',
          'discrete'] otherwize can be a list of openturns distributions.
        :param lst(str) dists: List of valid openturns distributions as string.
        :param int discrete: Position of the discrete variable.
        """
        self.n_samples = n_samples
        self.bounds = np.asarray(bounds)
        self.kind = kind
        self.dim = self.bounds.shape[1]

        self.scaler = preprocessing.MinMaxScaler()
        self.scaler.fit(self.bounds)

        if dists is None:
            dists = [ot.Uniform(float(self.bounds[0][i]),
                                float(self.bounds[1][i]))
                     for i in range(self.dim)]
        else:
            dists = bat.space.dists_to_ot(dists)

        if discrete is not None:
            # Creating uniform discrete distribution for OT
            disc_list = [[i] for i in range(int(self.bounds[0, discrete]),
                                            int(self.bounds[1, discrete] + 1))]
            disc_dist = ot.UserDefined(disc_list)

            dists.pop(discrete)
            dists.insert(discrete, disc_dist)

        # Join distribution
        self.distribution = ot.ComposedDistribution(dists)

        if self.kind == 'halton':
            self.sequence_type = ot.LowDiscrepancyExperiment(ot.HaltonSequence(),
                                                             self.distribution,
                                                             self.n_samples)
        elif self.kind == 'sobol':
            self.sequence_type = ot.LowDiscrepancyExperiment(ot.SobolSequence(),
                                                             self.distribution,
                                                             self.n_samples)
        elif self.kind == 'faure':
            self.sequence_type = ot.LowDiscrepancyExperiment(ot.FaureSequence(),
                                                             self.distribution,
                                                             self.n_samples)
        elif (self.kind == 'lhs') or (self.kind == 'lhsc'):
            self.sequence_type = ot.LHSExperiment(self.distribution, self.n_samples)
        elif self.kind == 'olhs':
            lhs = ot.LHSExperiment(self.distribution, self.n_samples)
            self.sequence_type = ot.SimulatedAnnealingLHS(lhs, ot.GeometricProfile(),
                                                          ot.SpaceFillingC2())
        elif self.kind == 'saltelli':
            # Only relevant for computation of Sobol' indices
            size = self.n_samples // (2 * self.dim + 2)  # N(2*dim + 2)
            self.sequence_type = ot.SobolIndicesExperiment(self.distribution,
                                                           size, True).generate()
Beispiel #20
0
    def sobol(self):
        """Compute Sobol' indices.

        It returns the second, first and total order indices of Sobol'.
        Two methods are possible for the indices:

        - `sobol`
        - `FAST`

        .. warning:: The second order indices are only available with the sobol
          method. Also, when there is no surrogate (ensemble mode), FAST is not
          available and the DoE must have been generated with `saltelli`.

        And two types of computation are availlable for the global indices:

        - `block`
        - `aggregated`

        If *aggregated*, *map* indices are computed. In case of a scalar value,
        all types returns the same values. *block* indices are written
        within `sensitivity.dat` and aggregated indices within
        `sensitivity_aggregated.dat`.

        Finally, it calls :func:`error_pod` in order to compare the indices
        with their analytical values.

        :return: Sobol' indices.
        :rtype: array_like.
        """
        indices = [[], [], []]
        aggregated = [[], [], []]
        indices_conf = [[], []]

        if self.type_indices == 'block':
            sobol_model = self.int_func
            sobol_len = 1
        else:
            sobol_model = self.func
            sobol_len = self.output_len

        if self.method_sobol == 'sobol':
            self.logger.info("\n----- Sobol' indices -----")

            if self.surrogate is not None:
                size = self.points_sample
                input_design = ot.SobolIndicesExperiment(self.distribution,
                                                         size, True).generate()
                output_design = sobol_model(input_design)
                self.logger.info("Created {} samples for Sobol'"
                                 .format(len(output_design)))
            else:
                input_design = self.space
                output_design = self.output
                size = len(self.space) // (2 * self.p_len + 2)
            # Martinez, Saltelli, MauntzKucherenko, Jansen
            ot.ResourceMap.SetAsBool('SobolIndicesAlgorithm-DefaultUseAsymptoticDistribution', True)
            sobol = ot.SaltelliSensitivityAlgorithm(input_design,
                                                    output_design, size)

            for i in range(sobol_len):
                try:
                    indices[0].append(np.array(sobol.getSecondOrderIndices(i)))
                except TypeError:
                    indices[0].append(np.zeros((self.p_len, self.p_len)))
            self.logger.debug("-> Second order:\n{}\n".format(indices[0]))

        elif self.method_sobol == 'FAST':
            self.logger.info("\n----- FAST indices -----")
            if self.output_len > 1:
                wrap_fun = sobol_model
            else:
                def wrap_fun(x):
                    return [sobol_model(x)]

            fast_model = ot.PythonFunction(self.p_len, self.output_len, wrap_fun)
            sobol = ot.FAST(ot.Function(fast_model),
                            self.distribution, self.points_sample)
            output_design = sobol_model(self.sample)
            self.logger.warning("No Second order indices with FAST")

        # try block used to handle boundary conditions with fixed values
        for i in range(sobol_len):
            try:
                indices[1].append(np.array(sobol.getFirstOrderIndices(i)))
            except TypeError:
                indices[1].append(np.zeros(self.p_len))
            try:
                indices[2].append(np.array(sobol.getTotalOrderIndices(i)))
            except TypeError:
                indices[2].append(np.zeros(self.p_len))

        self.logger.debug("-> First order:\n{}\n"
                          "-> Total:\n{}\n"
                          .format(*indices[1:]))

        # Write Sobol' indices to file: block or map
        if self.fname is not None:
            i1 = np.reshape(indices[1], (sobol_len, self.p_len))
            i2 = np.reshape(indices[2], (sobol_len, self.p_len))
            data = np.append(i1, i2, axis=1)

            names = ['S_{}'.format(p) for p in self.plabels]
            names += ['S_T_{}'.format(p) for p in self.plabels]

            if (self.output_len > 1) and (self.type_indices != 'block'):
                names = ['x'] + names
                data = np.append(np.reshape(self.xdata, (sobol_len, 1)), data, axis=1)
            sizes = [1] * len(names)

            self.io.write(os.path.join(self.fname, 'sensitivity.json'), data, names, sizes)
        else:
            self.logger.debug("No output folder to write indices in")

        # Aggregated Indices
        if self.type_indices == 'aggregated':
            self.logger.info("\n----- Aggregated Sensitivity Indices -----")

            output_var = output_design.var(axis=0)
            sum_var_indices = [np.zeros((self.p_len, self.p_len)),
                               np.zeros((self.p_len)), np.zeros((self.p_len))]

            # Compute manually for FAST and second order, otherwise OT
            if self.method_sobol == 'FAST':
                agg_range = [0, 1, 2]
            else:
                agg_range = [0]
            for i, j in itertools.product(range(self.output_len), agg_range):
                try:
                    indices[:][j][i] = np.nan_to_num(indices[:][j][i])
                    sum_var_indices[j] += float(output_var[i]) * indices[:][j][i]
                except IndexError:
                    sum_var_indices[j] = np.inf
            sum_var = np.sum(output_var)
            for i in range(3):
                aggregated[i] = sum_var_indices[i] / sum_var

            if self.method_sobol != 'FAST':
                aggregated[1] = np.array(sobol.getAggregatedFirstOrderIndices())
                aggregated[2] = np.array(sobol.getAggregatedTotalOrderIndices())
                indices_conf[0] = sobol.getFirstOrderIndicesInterval()
                indices_conf[1] = sobol.getTotalOrderIndicesInterval()

                self.logger.info("-> First order confidence:\n{}\n"
                                 "-> Total order confidence:\n{}\n"
                                 .format(*indices_conf))

            self.logger.info("Aggregated_indices:\n"
                             "-> Second order:\n{}\n"
                             "-> First order:\n{}\n"
                             "-> Total order:\n{}\n"
                             .format(*aggregated))

            # Write aggregated indices to file
            if self.fname is not None:
                i1 = np.array(aggregated[1])
                i2 = np.array(aggregated[2])
                if self.method_sobol != 'FAST':
                    i1_min = np.array(indices_conf[0].getLowerBound())
                    i1_max = np.array(indices_conf[0].getUpperBound())
                    i2_min = np.array(indices_conf[1].getLowerBound())
                    i2_max = np.array(indices_conf[1].getUpperBound())

                    # layout: [S_min_P1, S_min_P2, ..., S_P1, S_p2, ...]
                    data = np.array([i1_min, i1, i1_max, i2_min, i2, i2_max]).flatten()

                    names = [i + str(p) for i, p in
                             itertools.product(['S_min_', 'S_', 'S_max_',
                                                'S_T_min_', 'S_T_', 'S_T_max_'],
                                               self.plabels)]

                    conf = [(i1_max - i1_min) / 2, (i2_max - i2_min) / 2]
                else:
                    conf = None
                    names = [i + str(p) for i, p in
                             itertools.product(['S_', 'S_T_'], self.plabels)]
                    data = np.append(i1, i2)

                self.io.write(os.path.join(self.fname, 'sensitivity_aggregated.json'), data, names)
            else:
                self.logger.debug("No output folder to write aggregated indices in")

            full_indices = [aggregated[1], aggregated[2], indices[1], indices[2]]
        else:
            full_indices = [indices[1][0], indices[2][0]]
            aggregated = [indices[0][0], indices[1][0], indices[2][0]]
            conf = None
            self.xdata = None

        # Plot
        if self.fname:
            path = os.path.join(self.fname, 'sensitivity.pdf')
            plabels = [re.sub(r'(_)(.*)', r'\1{\2}', label)
                       for label in self.plabels]
            visualization.sensitivity_indices(full_indices, plabels=plabels,
                                              conf=conf, xdata=self.xdata,
                                              fname=path)
            path = os.path.join(self.fname, 'sensitivity-polar.pdf')
            visualization.sensitivity_indices(full_indices, plabels=plabels,
                                              conf=conf, polar=True,
                                              xdata=self.xdata, fname=path)
            if self.mesh_kwargs.get('fname'):
                path = os.path.join(self.fname, '1st_order_Sobol_map.pdf')
                visualization.mesh_2D(var=full_indices[2], flabels=plabels,
                                      output_path=path, **self.mesh_kwargs)
                path = os.path.join(self.fname, 'Total_order_Sobol_map.pdf')
                visualization.mesh_2D(var=full_indices[3], flabels=plabels,
                                      output_path=path, **self.mesh_kwargs)

        # Compute error of the POD with a known function
        if (self.type_indices in ['aggregated', 'block'])\
                and (self.test) and (self.surrogate):
            self.error_model(aggregated, self.test)

        return aggregated
Beispiel #21
0
Q = ot.ParametrizedDistribution(myParamQ)
otLOW = ot.TruncatedDistribution.LOWER
Q = ot.TruncatedDistribution(Q, 0, otLOW)
Ks = ot.Normal(30.0, 7.5)
Ks = ot.TruncatedDistribution(Ks, 0, otLOW)
Zv = ot.Uniform(49.0, 51.0)
Zm = ot.Uniform(54.0, 56.0)

# 4. Create the joint distribution function,
#    the output and the event.
X = ot.ComposedDistribution([Q, Ks, Zv, Zm])
Y = ot.RandomVector(g, ot.RandomVector(X))

# Works
size = 100000
inputDesign = ot.SobolIndicesExperiment(X, size).generate()
outputDesign = g(inputDesign)
sensitivityAnalysis = ot.SaltelliSensitivityAlgorithm(inputDesign,
                                                      outputDesign, size)
View(sensitivityAnalysis.draw())  # OK

dist_fo = sensitivityAnalysis.getFirstOrderIndicesDistribution()
dist_to = sensitivityAnalysis.getTotalOrderIndicesDistribution()

alpha = 0.1
input_dimension = X.getDimension()
for i in range(input_dimension):
    dist_fo_i = dist_fo.getMarginal(i)
    dist_to_i = dist_to.getMarginal(i)
    fo_ci = dist_fo_i.computeBilateralConfidenceInterval(1 - alpha)
    to_ci = dist_to_i.computeBilateralConfidenceInterval(1 - alpha)
graph = ot.HistogramFactory().build(sampleY).drawPDF()
view = viewer.View(graph)

# %%
# We see that the distribution of the output has two modes.

# %%
# Estimate the Sobol' indices
# ---------------------------

# %%
# We first create a design of experiments with the `SobolIndicesExperiment`. Since we are not interested in second order indices for the moment, we use the default value of the third argument (we will come back to this topic later).

# %%
size = 1000
sie = ot.SobolIndicesExperiment(im.distributionX, size)
inputDesign = sie.generate()
input_names = im.distributionX.getDescription()
inputDesign.setDescription(input_names)
inputDesign.getSize()

# %%
# We see that 5000 function evaluations are required to estimate the first order and total Sobol' indices.

# %%
# Then we evaluate the outputs corresponding to this design of experiments.

# %%
outputDesign = im.model(inputDesign)

# %%
        'sin(_pi*X1)+7*sin(_pi*X2)*sin(_pi*X2)+0.1*((_pi*X3)*(_pi*X3)*(_pi*X3)*(_pi*X3))*sin(_pi*X1)']

    model = ot.SymbolicFunction(['X1', 'X2', 'X3'], formula)

    distribution = ot.ComposedDistribution(
        [ot.Uniform(-1.0, 1.0)] * input_dimension)

    # Size of simulation
    size = 10000

    # Test with the various implementation methods
    methods = ["Saltelli", "Jansen", "MauntzKucherenko", "Martinez"]

    # Use of Generate method to build input/output designs
    computeSO = True
    inputDesign = ot.SobolIndicesExperiment(distribution, size, computeSO).generate()
    outputDesign = model(inputDesign)
    # Case 1 : Estimation of sensitivity using estimator and no bootstrap
    for method in methods:
        sensitivity_algorithm = eval(
            'ot.' + method + "SensitivityAlgorithm(inputDesign, outputDesign, size)")
        # Get first order indices
        fo = sensitivity_algorithm.getFirstOrderIndices()
        print("Method of evaluation=", method)
        print("First order indices = ", fo)
        # Get total order indices
        to = sensitivity_algorithm.getTotalOrderIndices()
        print("Total order indices = ", to)

    # Case 2 : Estimation of sensitivity using Martinez estimator and bootstrap
    nr_bootstrap = 100