def discretizeFromJoint(fullDistribution, ticks): fullDimension = fullDistribution.getDimension() conditioningDistribution = fullDistribution.getMarginal( [i for i in range(fullDimension - 1)]) # Add the range bounds to the given ticks lower = fullDistribution.getRange().getLowerBound() upper = fullDistribution.getRange().getUpperBound() expandedTicks = [0] * len(ticks) for i in range(fullDimension): expandedTicks[i] = [lower[i]] + ticks[i] + [upper[i]] # Now perform the full discretization lengths = [(len(t) - 1) for t in expandedTicks] tuples = ot.Tuples(lengths).generate() probabilities = ot.Point(len(tuples)) for i in range(len(tuples)): tuple = tuples[i] aFull = [expandedTicks[j][tuple[j]] for j in range(fullDimension)] bFull = [expandedTicks[j][tuple[j] + 1] for j in range(fullDimension)] aConditioning = [ expandedTicks[j][tuple[j]] for j in range(fullDimension - 1) ] bConditioning = [ expandedTicks[j][tuple[j] + 1] for j in range(fullDimension - 1) ] den = conditioningDistribution.computeProbability( ot.Interval(aConditioning, bConditioning)) if den > 0.0: num = fullDistribution.computeProbability(ot.Interval( aFull, bFull)) probabilities[i] = num / den return probabilities
def discretizeBernoulliFromConditionalProbability(conditionalProbability, conditioningDistribution, ticks, useSlowIntegration=True, nodesNumber=32): conditioningDimension = conditioningDistribution.getDimension() if useSlowIntegration: # Accurate but slow integrator = ot.IteratedQuadrature() else: # Less accurate for non-smooth integrand but fast ot.ResourceMap.SetAsUnsignedInteger( "GaussLegendre-DefaultMarginalIntegrationPointsNumber", nodesNumber) integrator = ot.GaussLegendre(conditioningDimension) # Add the range bounds to the given ticks lower = list(conditioningDistribution.getRange().getLowerBound()) upper = list(conditioningDistribution.getRange().getUpperBound()) # Add the range bounds to the given ticks lower = conditioningDistribution.getRange().getLowerBound() upper = conditioningDistribution.getRange().getUpperBound() expandedTicks = [0] * len(ticks) for i in range(conditioningDimension): expandedTicks[i] = [lower[i]] + ticks[i] + [upper[i]] # Now perform the full discretization lengths = [(len(t) - 1) for t in expandedTicks] tuples = ot.Tuples(lengths).generate() probabilitiesTrue = [0] * len(tuples) def kernel(x): x = np.array(x) return conditionalProbability(x) * np.array( conditioningDistribution.computePDF(x[:, 0:conditioningDimension])) for i in range(len(tuples)): tuple = tuples[i] aConditioning = [ expandedTicks[j][tuple[j]] for j in range(conditioningDimension) ] bConditioning = [ expandedTicks[j][tuple[j] + 1] for j in range(conditioningDimension) ] den = conditioningDistribution.computeProbability( ot.Interval(aConditioning, bConditioning)) if den > 0.0: num = integrator.integrate( ot.PythonFunction(conditioningDimension, 1, func_sample=kernel), ot.Interval(aConditioning, bConditioning))[0] probabilitiesTrue[i] = min(1.0, num / den) probabilities = ot.Point([1.0 - p for p in probabilitiesTrue] + probabilitiesTrue) return probabilities
def discretizeFromConditionalDensity(conditionalDensity, conditioningDistribution, ticks, useSlowIntegration=True, nodesNumber=32): fullDimension = conditioningDistribution.getDimension() + 1 if useSlowIntegration: # Accurate but slow integrator = ot.IteratedQuadrature() else: # Less accurate for non-smooth integrand but fast ot.ResourceMap.SetAsUnsignedInteger( "GaussLegendre-DefaultMarginalIntegrationPointsNumber", nodesNumber) integrator = ot.GaussLegendre(fullDimension) # Add the range bounds to the given ticks lower = list(conditioningDistribution.getRange().getLowerBound()) upper = list(conditioningDistribution.getRange().getUpperBound()) # For the conditioned variable it has to be estimated. We assume that the given # tick range is a correct margin to get the lower and upper bounds conditionedMin = min(ticks[fullDimension - 1]) conditionedMax = max(ticks[fullDimension - 1]) delta = conditionedMax - conditionedMin lower = lower + [conditionedMin - delta] upper = upper + [conditionedMax + delta] expandedTicks = [0] * fullDimension for i in range(fullDimension): expandedTicks[i] = [lower[i]] + ticks[i] + [upper[i]] # Now perform the full discretization lengths = [(len(t) - 1) for t in expandedTicks] tuples = ot.Tuples(lengths).generate() probabilities = ot.Point(len(tuples)) def kernel(x): x = np.array(x) return conditionalDensity(x) * np.array( conditioningDistribution.computePDF(x[:, 0:fullDimension - 1])) for i in range(len(tuples)): tuple = tuples[i] aFull = [expandedTicks[j][tuple[j]] for j in range(fullDimension)] bFull = [expandedTicks[j][tuple[j] + 1] for j in range(fullDimension)] num = integrator.integrate( ot.PythonFunction(fullDimension, 1, func_sample=kernel), ot.Interval(aFull, bFull))[0] probabilities[i] = num return probabilities
import openturns as ot from openturns.viewer import View # Tuples d = ot.Tuples([3, 4, 5]) s = ot.Sample(d.generate()) s.setDescription(["X1", "X2", "X3"]) g = ot.Graph() g.setTitle("Tuples generator") g.setGridColor("black") p = ot.Pairs(s) g.add(p) View(g)
' + 17*x1^3 - 10*x2^3 + 7*x4^3']) Y = myLinearModel(X) + R print(Y) ################################################################################################ # Build a model Y~(X1+X2+X3+X4)^3+I(Xi)^2+I(Xi)^3 dim = X.getDimension() enumerateFunction = ot.EnumerateFunction(dim) factory = ot.OrthogonalProductPolynomialFactory([ot.MonomialFactory()]*dim, enumerateFunction) # Build 'interactions' as a list of list [a1,a2,a3,a4], and we will generate tensorized # polynomials x1^a1*x2^a2*x3^a3*x4^a4. # Y ~ (X1+X2+X3+X4)^4 interactions = [x for x in ot.Tuples([2]*dim).generate()] # Remove X1*X2*X3*X4 to obtain Y ~ (X1+X2+X3+X4)^3 interactions.pop(interactions.index([1]*dim)) for i in xrange(dim): indices = [0]*dim indices[i] = 2 # Y ~ I(Xi)^2 interactions.append(indices[:]) # Y ~ I(Xi)^3 indices[i] = 3 interactions.append(indices[:]) basis = ot.Basis([factory.build(enumerateFunction.inverse(indices)) for indices in interactions]) ################################################################################################ i_min = [interactions.index([0,0,0,0])]
# # - The Combinations generator, which allows one to generate all the subsets of size :math:`k` of :math:`\{0,\dots,n-1\}` # # The total number of generated points is :math:`N=\dfrac{n!}{k!(n-k)!}`. # # %% import openturns as ot import openturns.viewer as viewer from matplotlib import pylab as plt import math as m ot.Log.Show(ot.Log.NONE) # %% # Tuples # ------ experiment = ot.Tuples([2, 3, 5]) print(experiment.generate()) # %% # K-permutations # -------------- experiment = ot.KPermutations(3, 4) print(experiment.generate()) # %% # Combinations # ------------ experiment = ot.Combinations(4, 6) print(experiment.generate())
#! /usr/bin/env python from __future__ import print_function import openturns as ot generators = [ ot.Tuples([4, 6, 9]), ot.KPermutations(4, 6), ot.Combinations(4, 6) ] for generator in generators: print('generator:', generator) subsets = generator.generate() print('subset:', subsets)
#! /usr/bin/env python from __future__ import print_function import openturns as ot generators = [ ot.Tuples([4, 6, 9]), ot.KPermutations(4, 6), ot.Combinations(4, 6)] for generator in generators: print('generator:', generator) subsets = generator.generate() print('subset:', subsets)