Beispiel #1
0
def BuildDistribution(X):
    #return ot.FunctionalChaosAlgorithm.BuildDistribution(X)
    input_dimension = X.shape[1]
    marginals = []
    for j in range(input_dimension):
        marginals.append(ot.HistogramFactory().build(X[:,j].reshape(-1, 1)))
    isIndependent = True
    for j in range(input_dimension):
        marginalJ = X[:,j].reshape(-1, 1)
        for i in range(j + 1, input_dimension):
            marginalI = X[:,i].reshape(-1, 1)
            testResult = ot.HypothesisTest.Spearman(marginalI, marginalJ)
            isIndependent = isIndependent and testResult.getBinaryQualityMeasure()
    copula = ot.IndependentCopula(input_dimension)
    if not isIndependent:
        copula = ot.NormalCopulaFactory().build(X)
    distribution = ot.ComposedDistribution(marginals, copula)
    return distribution
Beispiel #2
0
def multivariate_marginal_to_univariate_joint_cdf(  # noqa: C901
    marginal_cdfs_p: Union[List[Union[List[float], np.ndarray,
                                      ot.DistributionImplementation]],
                           np.ndarray],
    marginal_cdfs_v: Union[List[Union[List[float], np.ndarray]],
                           np.ndarray] = None,
    a: float = 0,
    b: float = 1,
    copula: ot.CopulaImplementation = None,
    agg_function: Callable[[np.ndarray], np.ndarray] = None,
    simplify: bool = True,
    n_draws: int = 100,
    empirical: bool = False,
) -> Tuple[np.array, np.array]:
    """Calculate univariate joint CDF given a list of multivariate marginal CDFs and a copula,
    returning both the cumulative probabilities and the aggregated outcome of the random variables.

    :param: marginal_cdfs_p: Each marginal CDF is a list (or 2darray) with cumulative probabilities up to cp=1.
    If a cdf does not go up to cp=1 and there are few cdfs (low dimension), we can still evaluate possible combinations
    for each marginal cp given. That is, the remaining probability is attributed to some higher (but unknown) outcome.
    However, the empirical method can't be used.
    :param: marginal_cdfs_v: Values of possible outcomes for each random variable, i.e. the bins of the marginal CDFs.
    If just one set of bins is given, we assume the CDFs share the same set of bins.
    If no bins are specified (the default), we assume the CDFs share a set of equal-sized bins between a and b.

    "All bins are equal, but some bins are more equal than others." (because they have a higher probability)

    :param: a: The lowest outcome (0 by default, and ignored if CDF values are given explicitly)
    :param: b: The highest outcome (1 by default, and ignored if CDF values are given explicitly)
    :param: copula: The default copula is the independence copula (i.e. we assume independent random variables).
    :param: agg_function: The default aggregation function is to take the sum of the outcomes of the random variables.
    :param: simplify: Simplify the resulting cdf by removing possible outcomes with zero probability (True by default)
    :param: n_draws: Number of draws (sample size) to compute the empirical CDF when aggregating >3 random variables.
    :param: empirical: Compute the empirical CDF regardless of number of random variables (default is False)
    """

    dim = len(marginal_cdfs_p)
    n_outcomes = (
        99
    )  # Todo: refactor to avoid having to set this above our threshold for computing exact probabilities

    # Set up marginal distributions
    empirical_method_possible = True
    if isinstance(marginal_cdfs_p[0], ot.DistributionImplementation):
        marginals = marginal_cdfs_p
        shared_bins = False
        empirical = True
    else:
        # Set up marginal cdf values
        n_outcomes = len(marginal_cdfs_p[0])
        shared_bins = True
        if marginal_cdfs_v is None:
            values = np.linspace(a, b, n_outcomes)
        elif isinstance(marginal_cdfs_v[0], (list, np.ndarray)):
            shared_bins = False
            values = marginal_cdfs_v
        else:
            values = marginal_cdfs_v

        marginals = []
        for i in range(dim):
            marginal_cdf = marginal_cdfs_p[i]
            if shared_bins is True:
                values_for_cdf = values
            else:
                values_for_cdf = marginal_cdfs_v[i]
            if not math.isclose(marginal_cdf[-1], 1, rel_tol=1e-7):
                empirical_method_possible = False
                # We can assume some higher outcome exists with cp=1
                values_for_cdf = np.append(
                    values_for_cdf, values_for_cdf[-1] +
                    1)  # Add a higher outcome (+1 suffices)
                marginal_pdf = np.clip(
                    np.concatenate((
                        [marginal_cdf[0]],
                        np.diff(marginal_cdf),
                        [1.0 - marginal_cdf[-1]],
                    )),
                    0,
                    1,
                )
                marginals.append(
                    ot.UserDefined([[v] for v in values_for_cdf],
                                   marginal_pdf))
            else:
                marginal_pdf = np.clip(cp_to_p(marginal_cdf), 0, 1)
                marginals.append(
                    ot.UserDefined([[v] for v in values_for_cdf],
                                   marginal_pdf))

    # If not specified, pick the independent copula as a default (i.e. assume independent random variables)
    if copula is None:
        copula = ot.IndependentCopula(dim)

    # If not specified, pick the sum function as a default for joining values
    if agg_function is None:
        agg_function = np.sum

    # Evaluate exact probabilities only for small bivariate and tri-variate joint distributions
    if dim <= 3 and n_outcomes <= 10 and empirical is False:

        # Determine joint distribution (too slow for high dimensions)
        d = ot.ComposedDistribution(marginals, copula)

        # Compute acceptable margin to prevent floating point errors (we'll evaluate a little on the right side of each marginal point)
        if shared_bins is True:
            smallest_marginal_point_distance = (np.diff(values).min()
                                                if n_outcomes > 1 else 1)
        elif dim > 1:
            smallest_marginal_point_distance = (np.diff(values, axis=1).min()
                                                if n_outcomes > 1 else 1)
        else:
            smallest_marginal_point_distance = (
                1
            )  # With just 1 point, an arbitrary positive distance suffices (e.g. 1)
        margin = smallest_marginal_point_distance / 2

        # Construct an n-dimensional matrix with all possible points (i.e. combinations of outcomes of our random variables)
        if shared_bins is True:
            marginal_points = list(product(values, repeat=dim))
            shape = (n_outcomes, ) * dim

            # Marginal points for the cdf evaluation are slightly higher to ensure we are on the right side of the discrete jump in cumulative probability
            marginal_points_for_cdf_evaluation = list(
                product([v + margin for v in values], repeat=dim))
        else:
            marginal_points = list(product(*marginal_cdfs_v))
            shape = [len(m) for m in marginal_cdfs_v]

            # Marginal points for the cdf evaluation
            marginal_points_for_cdf_evaluation = list(
                product(*[v + margin for v in marginal_cdfs_v]))

        # Evaluate exact probabilities at each point (too slow for high dimensions)
        joint_multivariate_cdf = np.reshape(
            d.computeCDF(marginal_points_for_cdf_evaluation), shape)
        joint_multivariate_pdf = joint_cdf_to_pdf(joint_multivariate_cdf)

        # Sort the probabilities ascending, keeping track of the corresponding values
        p, v = zip(*sorted(
            zip(joint_multivariate_pdf.flatten(),
                agg_function(marginal_points, 1))))

        # Calculate total probability of each unique value (by adding probability of cases that yield the same value)
        cdf_v = np.unique(v)
        pdf_p = np.array(
            [sum(np.array(p)[np.where(v == i)[0]]) for i in cdf_v])
    elif (
            empirical_method_possible is True
    ):  # Otherwise, compute the empirical cdf from a sample generated directly from the copula
        uniform_points = np.array(copula.getSample(
            n_draws))  # Much faster than sampling from the joint cdf
        aggregated_points = np.zeros(n_draws)
        for i, point in enumerate(uniform_points):
            aggregated_points[i] = agg_function(
                list(
                    marginal_cdf.computeQuantile(marginal_cdf_p)[0]
                    for marginal_cdf_p, marginal_cdf in zip(point, marginals)))
        empirical_cdf = ot.UserDefined([[v] for v in aggregated_points])
        pdf_p = np.array(empirical_cdf.getP())
        cdf_v = np.array(empirical_cdf.getX()).flatten()
    else:
        raise ValueError(
            "Empirical method not possible given incomplete marginal CDF. Make sure all CDFs go up to 1."
        )

    # Simplify resulting pdf
    if simplify is True:
        cdf_v = cdf_v[np.nonzero(pdf_p)]
        pdf_p = pdf_p[np.nonzero(pdf_p)]

    # Return the univariate joint cumulative probability function
    cdf_p = pdf_p.cumsum()

    return cdf_p, cdf_v
Beispiel #3
0
        import pyAgrum.lib.notebook as gnb
        gnb.showInference(model, evs=evs, size=size)
    except ImportError:
        pass

# **Probabilistic model**

# Marginal distributions
Torque = ot.LogNormal(0.0, 0.25)
Angle = ot.TruncatedNormal(0.0, 2.0, -8.0, 8.0)
Joint = ot.Uniform(1.8, 2.2)

# Dependence
rho = 0.5
TorqueAngleCopula = ot.NormalCopula(ot.CorrelationMatrix(2, [1.0, rho, rho, 1.0]))
copula = ot.ComposedCopula([TorqueAngleCopula, ot.IndependentCopula(1)])

# Joint distribution if needed
TorqueAngle = ot.ComposedDistribution([Torque, Angle], TorqueAngleCopula)
fullDistribution = ot.ComposedDistribution([Torque, Angle, Joint], copula)

# Leakage angle (rd)
angleMax = 5.0

# Leakage joint (mm)
jointMin = 2.0
jointSpread = 0.1

# Vibration torque (kN.m)
torqueSpread = 2.0
Beispiel #4
0
def IC_marginalKS(data, marginals):
    # Second model: marginal KS + independent copula
    print("Indep. copula")
    dimension = data.getDimension()
    model = ot.ComposedDistribution(marginals, ot.IndependentCopula(dimension))
    return model
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot
import math as m

# ot.Log.Show(ot.Log.ALL)

coll = []

# case 1: no transformation
coll.append([ot.Normal(), ot.Normal()])

# case 2: same copula
left = ot.ComposedDistribution([ot.Normal(), ot.Gumbel()],
                               ot.IndependentCopula(2))
right = ot.ComposedDistribution([ot.Triangular()] * 2, ot.IndependentCopula(2))
coll.append([left, right])

# case 3: same standard space
left = ot.ComposedDistribution([ot.Normal(), ot.Gumbel()],
                               ot.IndependentCopula(2))
right = ot.ComposedDistribution([ot.Triangular()] * 2, ot.GumbelCopula())
coll.append([left, right])

# TODO case 4: different standard space

for left, right in coll:
    transformation = ot.DistributionTransformation(left, right)
    print('left=', left)
    print('right=', right)
Beispiel #6
0
#!/usr/bin/env python
# coding:utf-8
"""Sample distribution.

Generate sampling using OpenTURNS. 

"""
import openturns as ot
import numpy as np
import json

n_samples = 100
dists = [ot.Uniform(20., 40.), ot.Normal(2345., 400.)]

settings_path = './'

with open(settings_path + 'settings.json', 'r') as f:
    settings = json.load(f)

distribution = ot.ComposedDistribution(dists, ot.IndependentCopula(len(dists)))
experiment = ot.LHSExperiment(distribution, n_samples, True, True)
sample = np.array(experiment.generate()).tolist()

settings['space']['sampling'] = sample

with open(settings_path + 'settings.json', 'w') as f:
    json.dump(settings, f, indent=4)
Beispiel #7
0
    input = ot.Description(3)
    input[0] = 'a'
    input[1] = 'b'
    input[2] = 'c'
    formulas = ot.Description(3)
    formulas[0] = 'a+b+c'
    formulas[1] = 'a-b*c'
    formulas[2] = '(a+2*b^2+3*c^3)/6'
    analytical = ot.SymbolicFunction(input, formulas)
    analytical.setName('analytical')
    analytical.setOutputDescription(['z1', 'z2', 'z3'])
    myStudy.add('analytical', analytical)

    # Create a TaylorExpansionMoments algorithm
    antecedent = ot.RandomVector(
        ot.IndependentCopula(analytical.getInputDimension()))
    antecedent.setName('antecedent')
    composite = ot.CompositeRandomVector(analytical, antecedent)
    composite.setName('composite')
    taylorExpansionsMoments = ot.TaylorExpansionMoments(composite)
    taylorExpansionsMoments.setName('taylorExpansionsMoments')
    taylorExpansionsMoments.getMeanFirstOrder()
    taylorExpansionsMoments.getMeanSecondOrder()
    taylorExpansionsMoments.getCovariance()

    myStudy.add('taylorExpansionsMoments', taylorExpansionsMoments)

    # Create a FORMResult
    input2 = ot.Description(2)
    input2[0] = 'x'
    input2[1] = 'y'
Beispiel #8
0
R_dist.setName('Yield strength')
R_dist.setDescription('R')

#Graphical output of the PDF
R_dist.drawPDF()

#Create a second marginal : Normal distribution 1D
F_dist = ot.Normal(75000, 5000)
F_dist.setName('Traction_load')
F_dist.setDescription('F')

#Graphical output of the PDF
F_dist.drawPDF()

#Create a copula : IndependentCopula (no correlation)
aCopula = ot.IndependentCopula(dim)
aCopula.setName('Independent copula')

#Instanciate one distribution object
myDistribution = ot.ComposedDistribution([R_dist, F_dist], aCopula)
myDistribution.setName('myDist')

#We create a 'usual' RandomVector from the Distribution
vect = ot.RandomVector(myDistribution)

#We create a composite random vector
G = ot.RandomVector(limitState, vect)

#We create an Event from this RandomVector
myEvent = ot.Event(G, ot.Less(), 0.0)
formula_fake_var = 'x1'
formula_y0 = 'cos(0.5*x1) + sin(x2)'
formula_y1 = 'cos(0.5*x1) + sin(x2) + x3'
symbolicModel = persalys.SymbolicPhysicalModel('symbolicModel', [x1, x2, x3], [fake_var, y0, fake_y0, y1], [
                                         formula_fake_var, formula_y0, formula_y0, formula_y1])

myStudy.add(symbolicModel)

# python model ##
code = 'from math import cos, sin, sqrt\n\ndef _exec(x1, x2, x3):\n    y0 = cos(0.5*x1) + sin(x2) + sqrt(x3)\n    return y0\n'
pythonModel = persalys.PythonPhysicalModel('pythonModel', [x1, x2, x3], [y0], code)
myStudy.add(pythonModel)

filename = 'data.csv'
cDist = ot.ComposedDistribution([ot.Normal(), ot.Gumbel(), ot.Normal(), ot.Uniform()],
                                ot.ComposedCopula([ot.IndependentCopula(2), ot.GumbelCopula()]))
sample = cDist.getSample(200)
sample.exportToCSVFile(filename, ' ')

# Designs of Experiment ##

# fixed design ##
ot.RandomGenerator.SetSeed(0)
fixedDesign = persalys.FixedDesignOfExperiment('fixedDesign', symbolicModel)
inputSample = ot.LHSExperiment(ot.ComposedDistribution([ot.Uniform(0., 10.), ot.Uniform(0., 10.)]), 10).generate()
inputSample.stack(ot.Sample(10, [0.5]))
fixedDesign.setOriginalInputSample(inputSample)
fixedDesign.run()
myStudy.add(fixedDesign)

# grid ##
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot
import math as m

# ot.Log.Show(ot.Log.ALL)

coll = []

# case 1: no transformation
coll.append([ot.Normal(), ot.Normal()])

# case 2: same copula
left = ot.ComposedDistribution([ot.Normal(), ot.Gumbel()],
                               ot.IndependentCopula(2))
right = ot.ComposedDistribution([ot.Triangular()] * 2, ot.IndependentCopula(2))
coll.append([left, right])

# case 3: same standard space
left = ot.ComposedDistribution([ot.Normal(), ot.Gumbel()],
                               ot.IndependentCopula(2))
right = ot.ComposedDistribution([ot.Triangular()] * 2, ot.GumbelCopula())
coll.append([left, right])

# TODO case 4: different standard space

for left, right in coll:
    transformation = ot.DistributionTransformation(left, right)
    print('left=', left)
    print('right=', right)
Beispiel #11
0
# model 1 ##
formula_fake_var = 'x1+'
formula_y0 = 'cos(0.5*x1) + sin(x2)'
formula_y1 = 'cos(0.5*x1) + sin(x2) + x3'
model1 = persalys.SymbolicPhysicalModel(
    'model1', [x1, x2, x3], [fake_var, y0, fake_y0, y1],
    [formula_fake_var, formula_y0, formula_y0, formula_y1])

myStudy.add(model1)

# model 3 ##
filename = 'data.csv'
cDist = ot.ComposedDistribution(
    [ot.Normal(), ot.Gumbel(),
     ot.Normal(), ot.Uniform()],
    ot.ComposedCopula([ot.IndependentCopula(2),
                       ot.GumbelCopula()]))
sample = cDist.getSample(20)
sample.exportToCSVFile(filename, ' ')
model3 = persalys.DataModel('model3', 'data.csv', [0, 2, 3], [1],
                            ['x_0', 'x_2', 'x_3'], ['x_1'])
myStudy.add(model3)

# Design of Experiment ##

probaDesign = persalys.ProbabilisticDesignOfExperiment('probaDesign', model1,
                                                       20, "MONTE_CARLO")
probaDesign.run()
myStudy.add(probaDesign)

# 1- meta model1 ##
Beispiel #12
0
# %%
showDot(dag.toDot())

# %%
# Learning parameters
# Bernstein copulas are used to learn the local conditional copulas associated to each node

# %%
m_list = []
lcc_list = []
for i in range(train.getDimension()):
    m_list.append(ot.UniformFactory().build(train.getMarginal(i)))
    indices = [i] + [int(n) for n in ndag.getParents(i)]
    dim_lcc = len(indices)
    if dim_lcc == 1:
        bernsteinCopula = ot.IndependentCopula(1)
    elif dim_lcc > 1:
        K = otagrum.ContinuousTTest.GetK(len(train), dim_lcc)
        bernsteinCopula = ot.EmpiricalBernsteinCopula(
            train.getMarginal(indices), K, False)
    lcc_list.append(bernsteinCopula)

# %%
# We can now create the learned CBN

# %%
lcbn = otagrum.ContinuousBayesianNetwork(ndag, m_list, lcc_list)  # Learned CBN

# %%
# And compare the mean loglikelihood between the true and the learned models
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
if ot.IndependentCopula().__class__.__name__ == 'EmpiricalBernsteinCopula':
    sample = ot.Dirichlet([1.0, 2.0, 3.0]).getSample(100)
    copula = ot.EmpiricalBernsteinCopula(sample, 4)
elif ot.IndependentCopula().__class__.__name__ == 'ExtremeValueCopula':
    copula = ot.ExtremeValueCopula(ot.SymbolicFunction("t", "t^3/2-t/2+1"))
elif ot.IndependentCopula(
).__class__.__name__ == 'MaximumEntropyOrderStatisticsCopula':
    marginals = [ot.Beta(1.5, 3.2, 0.0, 1.0), ot.Beta(2.0, 4.3, 0.5, 1.2)]
    copula = ot.MaximumEntropyOrderStatisticsCopula(marginals)
elif ot.IndependentCopula().__class__.__name__ == 'NormalCopula':
    R = ot.CorrelationMatrix(2)
    R[1, 0] = 0.8
    copula = ot.NormalCopula(R)
elif ot.IndependentCopula().__class__.__name__ == 'SklarCopula':
    student = ot.Student(3.0, [1.0] * 2, [3.0] * 2, ot.CorrelationMatrix(2))
    copula = ot.SklarCopula(student)
else:
    copula = ot.IndependentCopula()
if copula.getDimension() == 1:
    copula = ot.IndependentCopula(2)
copula.setDescription(['$u_1$', '$u_2$'])
pdf_graph = copula.drawPDF()
cdf_graph = copula.drawCDF()
fig = plt.figure(figsize=(10, 4))
pdf_axis = fig.add_subplot(121)
cdf_axis = fig.add_subplot(122)
View(pdf_graph,
     figure=fig,
Beispiel #14
0
# Statistical inference of the KL coefficients' distribution
kernel_smoothing = ot.KernelSmoothing(ot.Normal())
xi_marginal_distributions = ot.DistributionCollection([
    kernel_smoothing.build(xi[:, i][:, np.newaxis])
    for i in xrange(truncation_order)
])
try:
    xi_copula = ot.NormalCopulaFactory().build(xi)
except RuntimeError:
    print('ERR: The normal copula correlation matrix built from the given\n' +
          'Spearman correlation matrix is not definite positive.\n' +
          'This would require expert judgement on the correlation\n' +
          'coefficients significance (using e.g. Spearman test).\n' +
          'Assuming an independent copula in the sequel...')
    xi_copula = ot.IndependentCopula(truncation_order)
xi_estimated_distribution = ot.ComposedDistribution(xi_marginal_distributions,
                                                    xi_copula)

# Matrix plot of the empirical KL coefficients & their estimated distribution
matrix_plot(xi,
            ot_distribution=xi_estimated_distribution,
            labels=[('$\\xi_{%d}$' % i) for i in xrange(truncation_order)])
pl.suptitle('Karhunen-Loeve coefficients ' +
            '(observations and estimated distribution)')
pl.savefig('2D_identification_KL_coefficients_joint_distribution.png')
pl.close()

# Plot the ten first observed sample paths reconstructed from the estimated
# random field and an adequation plot with respect to the original observed
# sample paths
Beispiel #15
0
ndag = otagrum.NamedDAG(bn)
print("       size : ", ndag.getSize())
print("       desc : ", ndag.getDescription())
print("      nodes : ", ndag.getTopologicalOrder())
for nod in ndag.getTopologicalOrder():
    print(" parents(", nod, ") : ", ndag.getParents(nod))
    print("children(", nod, ") : ", ndag.getChildren(nod))

if False:
    marginals = [ot.Uniform(0.0, 1.0) for i in range(order.getSize())]
    copulas = list()
    for i in range(order.getSize()):
        d = 1 + ndag.getParents(i).getSize()
        print("i=", i, ", d=", d)
        if d == 1:
            copulas.append(ot.IndependentCopula(1))
        else:
            R = ot.CorrelationMatrix(d)
            for i in range(d):
                for j in range(i):
                    R[i, j] = 0.5 / d
            copulas.append(
                ot.Student(5.0, [0.0] * d, [1.0] * d, R).getCopula())

    cbn = otagrum.ContinuousBayesianNetwork(ndag, marginals, copulas)
    print("cbn=", cbn)
    print("cbn pdf=", cbn.computePDF([0.5] * d))
    print("cbn realization=", cbn.getRealization())
    size = 300
    sampleLearn = cbn.getSample(size)
    sample = cbn.getSample(size)
Beispiel #16
0
# %%

Tobs = np.array([4380, 1791, 1611, 1291, 6132, 5694, 5296, 4818, 4818, 4380])
fail = np.array([True] * 4 + [False] * 6)
x = ot.Sample(np.vstack((Tobs, fail)).T)

# %%
# Define a uniform prior distribution for :math:`\alpha` and a Gamma prior distribution for :math:`\beta`.
#

# %%

alpha_min, alpha_max = 0.5, 3.8
a_beta, b_beta = 2, 2e-4

priorCopula = ot.IndependentCopula(2)  # prior independence
priorMarginals = []  # prior marginals
priorMarginals.append(ot.Gamma(a_beta, b_beta))  # Gamma prior for beta
priorMarginals.append(ot.Uniform(alpha_min,
                                 alpha_max))  # uniform prior for alpha
prior = ot.ComposedDistribution(priorMarginals, priorCopula)
prior.setDescription(['beta', 'alpha'])

# %%
# We select prior means as the initial point of the Metropolis-Hastings algorithm.
#

# %%

initialState = [a_beta / b_beta, 0.5 * (alpha_max - alpha_min)]
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
if (ot.IndependentCopula().__class__.__name__ == 'SklarCopula'):
    myStudent = ot.Student(3.0, [1.0] * 2, [3.0] * 2, ot.CorrelationMatrix(2))
    copula = ot.SklarCopula(myStudent)
else:
    copula = ot.IndependentCopula()
if copula.getDimension() == 1:
    copula = ot.IndependentCopula(2)
copula.setDescription(['$u_1$', '$u_2$'])
pdf_graph = copula.drawPDF()
cdf_graph = copula.drawCDF()
fig = plt.figure(figsize=(10, 4))
plt.suptitle(str(copula))
pdf_axis = fig.add_subplot(121)
cdf_axis = fig.add_subplot(122)
View(pdf_graph, figure=fig, axes=[pdf_axis], add_legend=False)
View(cdf_graph, figure=fig, axes=[cdf_axis], add_legend=False)
pdf_axis.set_aspect('equal')
cdf_axis.set_aspect('equal')
# %%
import openturns as ot
ot.Log.Show(ot.Log.NONE)

# %%
# A typical example
# -----------------

# %%
# A recurring issue in uncertainty quantification is to perform analysis on an output variable of interest Y obtained through a model `f` and input parameters `X`.
# Here we shall consider the input parameters as two independent standard normal distributions :math:`X=(X_1, X_2)`. We therefore use an `IndependentCopula` to describe the link between the two marginals.
#

# input parameters
inputDist = ot.ComposedDistribution([ot.Normal()] * 2, ot.IndependentCopula(2))
inputDist.setDescription(['X1', 'X2'])

# %%
# We create a vector from the 2D-distribution created before :

# %%
inputVector = ot.RandomVector(inputDist)


# %%
# Suppose our model `f` is known and reads as :
#
# .. math::
#    f(X) = \begin{pmatrix}
#             x_1^2 + x_2 \\
Beispiel #19
0
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot
import math as m

# ot.Log.Show(ot.Log.ALL)

coll = []

# case 1: no transformation
coll.append([ot.Normal(), ot.Normal()])

# case 2: same copula
left = ot.ComposedDistribution(
    [ot.Normal(), ot.Gumbel()], ot.IndependentCopula(2))
right = ot.ComposedDistribution([ot.Triangular()] * 2, ot.IndependentCopula(2))
coll.append([left, right])

# case 3: same standard space
left = ot.ComposedDistribution(
    [ot.Normal(), ot.Gumbel()], ot.IndependentCopula(2))
right = ot.ComposedDistribution([ot.Triangular()] * 2, ot.GumbelCopula())
coll.append([left, right])

# TODO case 4: different standard space

for left, right in coll:
    transformation = ot.DistributionTransformation(left, right)
    print('left=', left)
    print('right=', right)
Beispiel #20
0
import openturns as ot
from matplotlib import pyplot as plt
from openturns.viewer import View
if ot.LogNormal().__class__.__name__ == 'Bernoulli':
    distribution = ot.Bernoulli(0.7)
elif ot.LogNormal().__class__.__name__ == 'Binomial':
    distribution = ot.Binomial(5, 0.2)
elif ot.LogNormal().__class__.__name__ == 'ComposedDistribution':
    copula = ot.IndependentCopula(2)
    marginals = [ot.Uniform(1.0, 2.0), ot.Normal(2.0, 3.0)]
    distribution = ot.ComposedDistribution(marginals, copula)
elif ot.LogNormal().__class__.__name__ == 'CumulativeDistributionNetwork':
    coll = [ot.Normal(2),ot.Dirichlet([0.5, 1.0, 1.5])]
    distribution = ot.CumulativeDistributionNetwork(coll, ot.BipartiteGraph([[0,1], [0,1]]))
elif ot.LogNormal().__class__.__name__ == 'Histogram':
    distribution = ot.Histogram([-1.0, 0.5, 1.0, 2.0], [0.45, 0.4, 0.15])
elif ot.LogNormal().__class__.__name__ == 'KernelMixture':
    kernel = ot.Uniform()
    sample = ot.Normal().getSample(5)
    bandwith = [1.0]
    distribution = ot.KernelMixture(kernel, bandwith, sample)
elif ot.LogNormal().__class__.__name__ == 'MaximumDistribution':
    coll = [ot.Uniform(2.5, 3.5), ot.LogUniform(1.0, 1.2), ot.Triangular(2.0, 3.0, 4.0)]
    distribution = ot.MaximumDistribution(coll)
elif ot.LogNormal().__class__.__name__ == 'Multinomial':
    distribution = ot.Multinomial(5, [0.2])
elif ot.LogNormal().__class__.__name__ == 'RandomMixture':
    coll = [ot.Triangular(0.0, 1.0, 5.0), ot.Uniform(-2.0, 2.0)]
    weights = [0.8, 0.2]
    cst = 3.0
    distribution = ot.RandomMixture(coll, weights, cst)
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot
import math as m

#ot.Log.Show(ot.Log.ALL)

coll = []

# case 1: no transformation
coll.append([ot.Normal(), ot.Normal()])

# case 2: same copula
left = ot.ComposedDistribution([ot.Normal(), ot.Gumbel()], ot.IndependentCopula(2))
right = ot.ComposedDistribution([ot.Triangular()]*2, ot.IndependentCopula(2))
coll.append([left, right])

# case 3: same standard space
left = ot.ComposedDistribution([ot.Normal(), ot.Gumbel()], ot.IndependentCopula(2))
right = ot.ComposedDistribution([ot.Triangular()]*2, ot.GumbelCopula())
coll.append([left, right])

#TODO case 4: different standard space

for left, right in coll:
    transformation = ot.DistributionTransformation(left, right)
    print('left=', left)
    print('right=', right)
    print('transformation=', transformation)
    inverseTransformation = transformation.inverse()