Exemplo n.º 1
0
    def __init__(self, beta_coeff, idx_set, jpdf):
        self.beta_coeff = beta_coeff
        self.idx_set = idx_set
        self.jpdf = jpdf
        self.N = jpdf.getDimension()

        # get the distribution type of each random variable
        dist_types = []
        for i in range(self.N):
            dist_type = self.jpdf.getMarginal(i).getName()
            dist_types.append(dist_type)

        # create orthogonal univariate bases
        poly_collection = ot.PolynomialFamilyCollection(self.N)
        for i in range(self.N):
            pdf = jpdf.getDistributionCollection()[i]
            algo = ot.AdaptiveStieltjesAlgorithm(pdf)
            poly_collection[i] = ot.StandardDistributionPolynomialFactory(algo)

        # create multivariate basis
        multivariate_basis = ot.OrthogonalProductPolynomialFactory(
            poly_collection, ot.EnumerateFunction(self.N))
        # get enumerate function (multi-index handling)
        enum_func = multivariate_basis.getEnumerateFunction()
        # get epansion
        self.expansion = multivariate_basis.getSubBasis(
            transform_multi_index_set(idx_set, enum_func))
        # create openturns surrogate model
        sur_model = ot.FunctionCollection()
        for i in range(len(self.expansion)):
            multi = str(beta_coeff[i]) + '*x'
            help_function = ot.SymbolicFunction(['x'], [multi])
            sur_model.add(ot.ComposedFunction(help_function,
                                              self.expansion[i]))
        self.surrogate_model = np.sum(sur_model)
Exemplo n.º 2
0
    def fit(self, X, y, **fit_params):
        input_dimension = X.shape[1]
        if self.distribution is None:
            self.distribution = BuildDistribution(X)
        if self.enumerate == 'linear':
            enumerateFunction = ot.LinearEnumerateFunction(input_dimension)
        elif self.enumerate == 'hyperbolic':
            enumerateFunction = ot.HyperbolicAnisotropicEnumerateFunction(input_dimension, self.q)
        else:
            raise ValueError('enumerate should be "linear" or "hyperbolic"')
        polynomials = [ot.StandardDistributionPolynomialFactory(self.distribution.getMarginal(i)) for i in range(input_dimension)]
        productBasis = ot.OrthogonalProductPolynomialFactory(polynomials, enumerateFunction)
        adaptiveStrategy = ot.FixedStrategy(productBasis, enumerateFunction.getStrataCumulatedCardinal(self.degree))
        if self.sparse:
            projectionStrategy = ot.LeastSquaresStrategy(ot.LeastSquaresMetaModelSelectionFactory(ot.LARS(), ot.CorrectedLeaveOneOut()))
        else:
            projectionStrategy = ot.LeastSquaresStrategy(X, y.reshape(-1, 1))
        algo = ot.FunctionalChaosAlgorithm(X, y.reshape(-1, 1), self.distribution, adaptiveStrategy, projectionStrategy)
        algo.run()
        self._result = algo.getResult()
        output_dimension = self._result.getMetaModel().getOutputDimension()

        # sensitivity
        si = ot.FunctionalChaosSobolIndices(self._result)
        if output_dimension == 1:
            self.feature_importances_ = [si.getSobolIndex(i) for i in range(input_dimension)]
        else:
            self.feature_importances_ = [[0.0] * input_dimension] * output_dimension
            for k in range(output_dimension):
                for i in range(input_dimension):
                    self.feature_importances_[k][i] = si.getSobolIndex(i, k)
        self.feature_importances_ = np.array(self.feature_importances_)
        return self
Exemplo n.º 3
0
    def _buildChaosAlgo(self, inputSample, outputSample):
        """
        Build the functional chaos algorithm without running it.
        """
        if self._distribution is None:
            # create default distribution : Uniform between min and max of the
            # input sample
            inputSample = ot.NumericalSample(inputSample)
            inputMin = inputSample.getMin()
            inputMin[0] = np.min(self._defectSizes)
            inputMax = inputSample.getMax()
            inputMax[0] = np.max(self._defectSizes)
            marginals = [
                ot.Uniform(inputMin[i], inputMax[i]) for i in range(self._dim)
            ]
            self._distribution = ot.ComposedDistribution(marginals)

        # put description of the inputSample into decription of the distribution
        self._distribution.setDescription(inputSample.getDescription())

        if self._adaptiveStrategy is None:
            # Create the adaptive strategy : default is fixed strategy of degree 5
            # with linear enumerate function
            polyCol = [0.] * self._dim
            for i in range(self._dim):
                polyCol[i] = ot.StandardDistributionPolynomialFactory(
                    self._distribution.getMarginal(i))

            enumerateFunction = ot.EnumerateFunction(self._dim)
            multivariateBasis = ot.OrthogonalProductPolynomialFactory(
                polyCol, enumerateFunction)
            # default degree is 3 (in __init__)
            indexMax = enumerateFunction.getStrataCumulatedCardinal(
                self._degree)
            self._adaptiveStrategy = ot.FixedStrategy(multivariateBasis,
                                                      indexMax)

        if self._projectionStrategy is None:
            # sparse polynomial chaos
            basis_sequence_factory = ot.LAR()
            fitting_algorithm = ot.KFold()
            approximation_algorithm = ot.LeastSquaresMetaModelSelectionFactory(
                basis_sequence_factory, fitting_algorithm)
            self._projectionStrategy = ot.LeastSquaresStrategy(
                inputSample, outputSample, approximation_algorithm)

        return ot.FunctionalChaosAlgorithm(inputSample, outputSample, \
                self._distribution, self._adaptiveStrategy, self._projectionStrategy)
# Get input & output sample
lhs = ot.LHSExperiment(distribution, samplingSize)
inputSample = lhs.generate()
outputSample = model(inputSample)

# Validation of results on independent samples
validationSize = 10
inputValidation = distribution.getSample(validationSize)
outputValidation = model(inputValidation)

# 1) SPC algorithm
# Create the orthogonal basis
polynomialCollection = [ot.LegendreFactory()] * dimension

enumerateFunction = ot.LinearEnumerateFunction(dimension)
productBasis = ot.OrthogonalProductPolynomialFactory(
    polynomialCollection, enumerateFunction)

# Create the adaptive strategy
degree = 8
basisSize = enumerateFunction.getStrataCumulatedCardinal(degree)
adaptiveStrategy = ot.FixedStrategy(productBasis, basisSize)

# Select the fitting algorithm
fittingAlgorithm = ot.KFold()
leastSquaresFactory = ot.LeastSquaresMetaModelSelectionFactory(
    ot.LARS(), fittingAlgorithm)

# Projection strategy
projectionStrategy = ot.LeastSquaresStrategy(
    inputSample, outputSample, leastSquaresFactory)
Exemplo n.º 5
0
from math import pi
from openturns.viewer import View
# Create the function
ot.RandomGenerator.SetSeed(0)
formula = ['sin(X1) + 7. * sin(X2)^2 + 0.1 * X3^4 * sin(X1)']
input_names = ['X1', 'X2', 'X3']
g = ot.SymbolicFunction(input_names, formula)
# Create the probabilistic model
distributionList = [ot.Uniform(-pi, pi)] * 3
distribution = ot.ComposedDistribution(distributionList)
# Create a training sample
N = 500
inputTrain = distribution.getSample(N)
outputTrain = g(inputTrain)
# Create the chaos
multivariateBasis = ot.OrthogonalProductPolynomialFactory(distributionList)
selectionAlgorithm = ot.LeastSquaresMetaModelSelectionFactory()
projectionStrategy = ot.LeastSquaresStrategy(inputTrain, outputTrain,
                                             selectionAlgorithm)
totalDegree = 8
enumfunc = multivariateBasis.getEnumerateFunction()
P = enumfunc.getStrataCumulatedCardinal(totalDegree)
adaptiveStrategy = ot.FixedStrategy(multivariateBasis, P)
chaosalgo = ot.FunctionalChaosAlgorithm(inputTrain, outputTrain, distribution,
                                        adaptiveStrategy, projectionStrategy)
chaosalgo.run()
result = chaosalgo.getResult()
# Draw Sobol' indices
chaosSI = ot.FunctionalChaosSobolIndices(result)
dim_input = distribution.getDimension()
first_order = [chaosSI.getSobolIndex(i) for i in range(dim_input)]
#
# Then we create the multivariate basis onto which the function is expanded.
# By default, it is associated with the linear enumeration rule. Since our
# marginals are uniform, the :class:`~openturns.OrthogonalProductPolynomialFactory` class
# produces Legendre polynomials.
# In order to create the multivariate basis of polynomials, we must specify the
# number of functions in the basis. In this particular case, we compute that
# number depending on the total degree.
# The :meth:`~openturns.EnumerateFunction.getMaximumDegreeStrataIndex` method
# of the enumeration function computes the number of layers necessary to achieve
# that total degree. Then the number of functions up to that layer is computed
# with the :meth:`~openturns.EnumerateFunction.getStrataCumulatedCardinal` method.

# %%
dimension = im.distributionX.getDimension()
multivariateBasis = ot.OrthogonalProductPolynomialFactory(
    [im.distributionX.getMarginal(i) for i in range(dimension)])

totalDegree = 5  # Polynomial degree
enumerate_function = multivariateBasis.getEnumerateFunction()
strataIndex = enumerate_function.getMaximumDegreeStrataIndex(totalDegree)
print("strataIndex = ", strataIndex)
number_of_terms_in_basis = enumerate_function.getStrataCumulatedCardinal(
    strataIndex)
print("number_of_terms_in_basis = ", number_of_terms_in_basis)
adaptiveStrategy = ot.FixedStrategy(multivariateBasis,
                                    number_of_terms_in_basis)
print(adaptiveStrategy)

# %%
#
# We compute the coefficients using a multivariate tensor product Gaussian
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot

try:
    ot.ResourceMap.SetAsScalar(
        "LinearCombinationEvaluation-SmallCoefficient", 1.0e-10)
    domain = ot.Interval(-1.0, 1.0)
    basis = ot.OrthogonalProductPolynomialFactory([ot.LegendreFactory()])
    basisSize = 5
    experiment = ot.LHSExperiment(basis.getMeasure(), 100)
    mustScale = False
    threshold = 0.0001
    model = ot.AbsoluteExponential([1.0])
    algo = ot.KarhunenLoeveQuadratureAlgorithm(
        domain, model, experiment, basis, basisSize, mustScale, threshold)
    algo.run()
    result = algo.getResult()
    lambd = result.getEigenValues()
    KLModes = result.getModesAsProcessSample()
    print("KL modes=", KLModes)
    print("KL eigenvalues=", lambd)
    process = ot.GaussianProcess(model, KLModes.getMesh())
    sample = process.getSample(10)
    coefficients = result.project(sample)
    print("KL coefficients=", coefficients)
    KLFunctions = result.getModes()
    print("KL functions=", KLFunctions)
    print("KL lift=", result.lift(coefficients[0]))
    print("KL lift as field=", result.liftAsField(coefficients[0]))
Exemplo n.º 8
0
# %%
# Prepare the input/output samples
sampleSize = 250
X = distribution.getSample(sampleSize)
Y = myModel(X)
dimension = X.getDimension()

# %%
# build the orthogonal basis
coll = [
    ot.StandardDistributionPolynomialFactory(distribution.getMarginal(i))
    for i in range(dimension)
]
enumerateFunction = ot.LinearEnumerateFunction(dimension)
productBasis = ot.OrthogonalProductPolynomialFactory(coll, enumerateFunction)

# %%
# create the algorithm
degree = 6
adaptiveStrategy = ot.FixedStrategy(
    productBasis, enumerateFunction.getStrataCumulatedCardinal(degree))
projectionStrategy = ot.LeastSquaresStrategy()
algo = ot.FunctionalChaosAlgorithm(X, Y, distribution, adaptiveStrategy,
                                   projectionStrategy)
algo.run()

# %%
# get the metamodel function
result = algo.getResult()
metamodel = result.getMetaModel()
Exemplo n.º 9
0
    def __init__(self, strategy, degree, distributions, N_quad=None, sample=None,
                 stieltjes=True, sparse_param={}):
        """Generate truncature and projection strategies.

        Allong with the strategies the sample is storred as an attribute.
        :attr:`sample` as well as corresponding weights: :attr:`weights`.

        :param str strategy: Least square or Quadrature ['LS', 'Quad', 'SparseLS'].
        :param int degree: Polynomial degree.
        :param  distributions: Distributions of each input parameter.
        :type distributions: lst(:class:`openturns.Distribution`)
        :param array_like sample: Samples for least square
          (n_samples, n_features).
        :param bool stieltjes: Wether to use Stieltjes algorithm for the basis.
        :param dict sparse_param: Parameters for the Sparse Cleaning Truncation
          Strategy and/or hyperbolic truncation of the initial basis.

            - **max_considered_terms** (int) -- Maximum Considered Terms,
            - **most_significant** (int), Most Siginificant number to retain,
            - **significance_factor** (float), Significance Factor,
            - **hyper_factor** (float), factor for hyperbolic truncation
              strategy.
        """
        # distributions
        self.in_dim = len(distributions)
        self.dist = ot.ComposedDistribution(distributions)
        self.sparse_param = sparse_param

        if 'hyper_factor' in self.sparse_param:
            enumerateFunction = ot.EnumerateFunction(self.in_dim, self.sparse_param['hyper_factor'])
        else:
            enumerateFunction = ot.EnumerateFunction(self.in_dim)

        if stieltjes:
            # Tend to result in performance issue
            self.basis = ot.OrthogonalProductPolynomialFactory(
                [ot.StandardDistributionPolynomialFactory(
                    ot.AdaptiveStieltjesAlgorithm(marginal))
                 for marginal in distributions], enumerateFunction)
        else:
            self.basis = ot.OrthogonalProductPolynomialFactory(
                [ot.StandardDistributionPolynomialFactory(margin)
                 for margin in distributions], enumerateFunction)

        self.n_basis = enumerateFunction.getStrataCumulatedCardinal(degree)

        # Strategy choice for expansion coefficient determination
        self.strategy = strategy
        if self.strategy == 'LS' or self.strategy == 'SparseLS':  # least-squares method
            self.sample = sample
        else:  # integration method
            # redefinition of sample size
            # n_samples = (degree + 1) ** self.in_dim
            # marginal degree definition
            # by default: the marginal degree for each input random
            # variable is set to the total polynomial degree 'degree'+1
            measure = self.basis.getMeasure()

            if N_quad is not None:
                degrees = [int(N_quad ** 0.25)] * self.in_dim
            else:
                degrees = [degree + 1] * self.in_dim

            self.proj_strategy = ot.IntegrationStrategy(
                ot.GaussProductExperiment(measure, degrees))
            self.sample, self.weights = self.proj_strategy.getExperiment().generateWithWeights()

            if not stieltjes:
                transformation = ot.Function(ot.MarginalTransformationEvaluation(
                    [measure.getMarginal(i) for i in range(self.in_dim)],
                    distributions, False))
                self.sample = transformation(self.sample)

        self.pc = None
        self.pc_result = None
Exemplo n.º 10
0
def dali_pce(func,
             N,
             jpdf_cp,
             jpdf_ot,
             tol=1e-12,
             max_fcalls=1000,
             verbose=True,
             interp_dict={}):

    if not interp_dict:  # if dictionary is empty --> cold-start
        idx_act = []  # M_activated x N
        idx_adm = []  # M_admissible x N
        fevals_act = []  # M_activated x 1
        fevals_adm = []  # M_admissible x 1
        coeff_act = []  # M_activated x 1
        coeff_adm = []  #  M_admissible x 1

        # start with 0 multi-index
        knot0 = []
        for n in range(N):
            # get knots per dimension based on maximum index
            kk, ww = seq_lj_1d(order=0, dist=jpdf_cp[n])
            knot0.append(kk[0])
        feval = func(knot0)

        # update activated sets
        idx_act.append([0] * N)
        coeff_act.append(feval)
        fevals_act.append(feval)

        # local error indicators
        local_error_indicators = np.abs(coeff_act)

        # get the OT distribution type of each random variable
        dist_types = []
        for i in range(N):
            dist_type = jpdf_ot.getMarginal(i).getName()
            dist_types.append(dist_type)

        # create orthogonal univariate bases
        poly_collection = ot.PolynomialFamilyCollection(N)
        for i in range(N):
            if dist_types[i] == 'Uniform':
                poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
                    ot.LegendreFactory())
            elif dist_types[i] == 'Normal':
                poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
                    ot.HermiteFactory())
            elif dist_types[i] == 'Beta':
                poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
                    ot.JacobiFactory())
            elif dist_types[i] == 'Gamma':
                poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
                    ot.LaguerreFactory())
            else:
                pdf = jpdf_ot.getDistributionCollection()[i]
                algo = ot.AdaptiveStieltjesAlgorithm(pdf)
                poly_collection[i] = ot.StandardDistributionPolynomialFactory(
                    algo)

        # create multivariate basis
        mv_basis = ot.OrthogonalProductPolynomialFactory(
            poly_collection, ot.EnumerateFunction(N))
        # get enumerate function (multi-index handling)
        enum_func = mv_basis.getEnumerateFunction()

    else:
        idx_act = interp_dict['idx_act']
        idx_adm = interp_dict['idx_adm']
        coeff_act = interp_dict['coeff_act']
        coeff_adm = interp_dict['coeff_adm']
        fevals_act = interp_dict['fevals_act']
        fevals_adm = interp_dict['fevals_adm']
        mv_basis = interp_dict['mv_basis']
        enum_func = interp_dict['enum_func']
        # local error indicators
        local_error_indicators = np.abs(coeff_adm)

    # compute global error indicator
    global_error_indicator = local_error_indicators.sum()  # max or sum

    # fcalls / M approx. terms up to now
    fcalls = len(idx_act) + len(idx_adm)  # fcalls = M --> approx. terms

    # maximum index per dimension
    max_idx_per_dim = np.max(idx_act + idx_adm, axis=0)

    # univariate knots and polynomials per dimension
    knots_per_dim = {}
    for n in range(N):
        kk, ww = seq_lj_1d(order=max_idx_per_dim[n], dist=jpdf_cp[n])
        knots_per_dim[n] = kk

    # start iterations
    while global_error_indicator > tol and fcalls < max_fcalls:
        if verbose:
            print(fcalls)
            print(global_error_indicator)

        # the index added last to the activated set is the one to be refined
        last_act_idx = idx_act[-1][:]
        # compute the knot corresponding to the lastly added index
        last_knot = [
            knots_per_dim[n][i] for n, i in zip(range(N), last_act_idx)
        ]
        # get admissible neighbors of the lastly added index
        adm_neighbors = admissible_neighbors(last_act_idx, idx_act)

        for an in adm_neighbors:
            # update admissible index set
            idx_adm.append(an)
            # find which parameter/direction n (n=1,2,...,N) gets refined
            n_ref = np.argmin(
                [idx1 == idx2 for idx1, idx2 in zip(an, last_act_idx)])
            # sequence of 1d Leja nodes/weights for the given refinement
            knots_n, weights_n = seq_lj_1d(an[n_ref], jpdf_cp[int(n_ref)])

            # update max_idx_per_dim, knots_per_dim, if necessary
            if an[n_ref] > max_idx_per_dim[n_ref]:
                max_idx_per_dim[n_ref] = an[n_ref]
                knots_per_dim[n_ref] = knots_n

            # find new_knot and compute function on new_knot
            new_knot = last_knot[:]
            new_knot[n_ref] = knots_n[-1]
            feval = func(new_knot)
            fevals_adm.append(feval)
            fcalls += 1  # update function calls

        # create PCE basis
        idx_system = idx_act + idx_adm
        idx_system_single = transform_multi_index_set(idx_system, enum_func)
        system_basis = mv_basis.getSubBasis(idx_system_single)
        # get corresponding evaluations
        fevals_system = fevals_act + fevals_adm
        # multi-dimensional knots
        M = len(idx_system)  # equations terms
        knots_md = [[knots_per_dim[n][idx_system[m][n]] for m in range(M)]
                    for n in range(N)]
        knots_md = np.array(knots_md).T
        # design matrix
        D = get_design_matrix(system_basis, knots_md)
        # solve system of equaations
        Q, R = scl.qr(D, mode='economic')
        c = Q.T.dot(fevals_system)
        coeff_system = scl.solve_triangular(R, c)

        # find the multi-index with the largest contribution, add it to idx_act
        # and delete it from idx_adm
        coeff_act = coeff_system[:len(idx_act)].tolist()
        coeff_adm = coeff_system[-len(idx_adm):].tolist()
        help_idx = np.argmax(np.abs(coeff_adm))
        idx_add = idx_adm.pop(help_idx)
        pce_coeff_add = coeff_adm.pop(help_idx)
        fevals_add = fevals_adm.pop(help_idx)
        idx_act.append(idx_add)
        coeff_act.append(pce_coeff_add)
        fevals_act.append(fevals_add)
        # re-compute coefficients of admissible multi-indices

        # local error indicators
        local_error_indicators = np.abs(coeff_adm)
        # compute global error indicator
        global_error_indicator = local_error_indicators.sum()  # max or sum

    # store expansion data in dictionary
    interp_dict = {}
    interp_dict['idx_act'] = idx_act
    interp_dict['idx_adm'] = idx_adm
    interp_dict['coeff_act'] = coeff_act
    interp_dict['coeff_adm'] = coeff_adm
    interp_dict['fevals_act'] = fevals_act
    interp_dict['fevals_adm'] = fevals_adm
    interp_dict['enum_func'] = enum_func
    interp_dict['mv_basis'] = mv_basis
    return interp_dict
view = viewer.View(graph_ev_X)

# %%
# Polynomial chaos between KL coefficients
print("project sample_X")
sample_xi_X = result_X.project(sample_X)

print("project sample_Y")
sample_xi_Y = result_Y.project(sample_Y)

print("Compute PCE between coefficients")
degree = 1
dimension_xi_X = sample_xi_X.getDimension()
dimension_xi_Y = sample_xi_Y.getDimension()
enumerateFunction = ot.LinearEnumerateFunction(dimension_xi_X)
basis = ot.OrthogonalProductPolynomialFactory(
    [ot.HermiteFactory()] * dimension_xi_X, enumerateFunction)
basisSize = enumerateFunction.getStrataCumulatedCardinal(degree)
adaptive = ot.FixedStrategy(basis, basisSize)
projection = ot.LeastSquaresStrategy(
    ot.LeastSquaresMetaModelSelectionFactory(ot.LARS(),
                                             ot.CorrectedLeaveOneOut()))
ot.ResourceMap.SetAsScalar("LeastSquaresMetaModelSelection-ErrorThreshold",
                           1.0e-7)
algo_chaos = ot.FunctionalChaosAlgorithm(sample_xi_X, sample_xi_Y,
                                         basis.getMeasure(), adaptive,
                                         projection)
algo_chaos.run()
result_chaos = algo_chaos.getResult()
meta_model = result_chaos.getMetaModel()
print("myConvolution=", myConvolution.getInputDimension(), "->",
      myConvolution.getOutputDimension())
Exemplo n.º 12
0
    def train(self):
        self.input_dim = self.training_points[None][0][0].shape[1]
        x_train = ot.Sample(self.training_points[None][0][0])
        y_train = ot.Sample(self.training_points[None][0][1])

        # Distribution choice of the inputs to Create the input distribution
        distributions = []
        dist_specs = self.options["uncertainty_specs"]
        if dist_specs:
            if len(dist_specs) != self.input_dim:
                raise SurrogateOpenturnsException(
                    "Number of distributions should be equal to input \
                        dimensions. Should be {}, got {}".format(
                        self.input_dim, len(dist_specs)
                    )
                )
            for ds in dist_specs:
                dist_klass = getattr(sys.modules["openturns"], ds["name"])
                args = [ds["kwargs"][name] for name in DISTRIBUTION_SIGNATURES[ds["name"]]]
                distributions.append(dist_klass(*args))
        else:
            for i in range(self.input_dim):
                mean = np.mean(x_train[:, i])
                lower, upper = 0.95 * mean, 1.05 * mean
                if mean < 0:
                    lower, upper = upper, lower
                distributions.append(ot.Uniform(lower, upper))

        distribution = ot.ComposedDistribution(distributions)

        # Polynomial basis
        # step 1 - Construction of the multivariate orthonormal basis:
        # Build orthonormal or orthogonal univariate polynomial families
        # (associated to associated input distribution)
        polynoms = [0.0] * self.input_dim
        for i in range(distribution.getDimension()):
            polynoms[i] = ot.StandardDistributionPolynomialFactory(
                distribution.getMarginal(i)
            )
        enumerateFunction = ot.LinearEnumerateFunction(self.input_dim)
        productBasis = ot.OrthogonalProductPolynomialFactory(
            polynoms, enumerateFunction
        )

        # step 2 - Truncation strategy of the multivariate orthonormal basis:
        # a strategy must be chosen for the selection of the different terms
        # of the multivariate basis.
        # Truncature strategy of the multivariate orthonormal basis
        # We choose all the polynomials of degree <= degree
        degree = self.options["pce_degree"]
        index_max = enumerateFunction.getStrataCumulatedCardinal(degree)
        adaptive_strategy = ot.FixedStrategy(productBasis, index_max)

        basis_sequenceFactory = ot.LARS()
        fitting_algorithm = ot.CorrectedLeaveOneOut()
        approximation_algorithm = ot.LeastSquaresMetaModelSelectionFactory(
            basis_sequenceFactory, fitting_algorithm
        )
        projection_strategy = ot.LeastSquaresStrategy(
            x_train, y_train, approximation_algorithm
        )

        algo = ot.FunctionalChaosAlgorithm(
            x_train, y_train, distribution, adaptive_strategy, projection_strategy
        )
        # algo = ot.FunctionalChaosAlgorithm(X_train_NS, Y_train_NS)
        algo.run()
        self._pce_result = algo.getResult()
Exemplo n.º 13
0
    elif dist_types[i] == 'Normal':
        poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
            ot.HermiteFactory())
    elif dist_types[i] == 'Beta':
        poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
            ot.JacobiFactory())
    elif dist_types[i] == 'Gamma':
        poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
            ot.LaguerreFactory())
    else:
        pdf = jpdf_ot.getDistributionCollection()[i]
        algo = ot.AdaptiveStieltjesAlgorithm(pdf)
        poly_collection[i] = ot.StandardDistributionPolynomialFactory(algo)

# create multivariate basis
mv_basis = ot.OrthogonalProductPolynomialFactory(poly_collection,
                                                 ot.EnumerateFunction(N))
# get enumerate function (multi-index handling)
enum_func = mv_basis.getEnumerateFunction()

max_fcalls = np.linspace(100, 1000, 10).tolist()
meanz = []
varz = []
cv_errz_rms = []
cv_errz_max = []
fcallz = []
# cross validation sample
np.random.seed(42)
nsamples = 1000
cv_samples_in = jpdf.sample(nsamples).T
cv_samples_out = [meroND(sample) for sample in cv_samples_in]
for mfc in max_fcalls:
Exemplo n.º 14
0
    def fit(self, X, y, **fit_params):
        """Fit PC regression model.

        Parameters
        ----------
        X : array-like, shape = (n_samples, n_features)
            Training data.
        y : array-like, shape = (n_samples, [n_output_dims])
            Target values.

        Returns
        -------
        self : returns an instance of self.

        """
        if len(X) == 0:
            raise ValueError(
                "Can not perform chaos expansion with empty sample")
        # check data type is accurate
        if (len(np.shape(X)) != 2):
            raise ValueError("X has incorrect shape.")
        input_dimension = len(X[1])
        if (len(np.shape(y)) != 2):
            raise ValueError("y has incorrect shape.")
        if self.distribution is None:
            self.distribution = ot.MetaModelAlgorithm.BuildDistribution(X)
        if self.enumeratef == 'linear':
            enumerateFunction = ot.LinearEnumerateFunction(input_dimension)
        elif self.enumeratef == 'hyperbolic':
            enumerateFunction = ot.HyperbolicAnisotropicEnumerateFunction(
                input_dimension, self.q)
        else:
            raise ValueError('enumeratef should be "linear" or "hyperbolic"')
        polynomials = [
            ot.StandardDistributionPolynomialFactory(
                self.distribution.getMarginal(i))
            for i in range(input_dimension)
        ]
        productBasis = ot.OrthogonalProductPolynomialFactory(
            polynomials, enumerateFunction)
        adaptiveStrategy = ot.FixedStrategy(
            productBasis,
            enumerateFunction.getStrataCumulatedCardinal(self.degree))
        if self.sparse:
            # Filter according to the sparse_fitting_algorithm key
            if self.sparse_fitting_algorithm == "cloo":
                fitting_algorithm = ot.CorrectedLeaveOneOut()
            else:
                fitting_algorithm = ot.KFold()
            # Define the correspondinding projection strategy
            projectionStrategy = ot.LeastSquaresStrategy(
                ot.LeastSquaresMetaModelSelectionFactory(
                    ot.LARS(), fitting_algorithm))
        else:
            projectionStrategy = ot.LeastSquaresStrategy(X, y)
        algo = ot.FunctionalChaosAlgorithm(X, y, self.distribution,
                                           adaptiveStrategy,
                                           projectionStrategy)
        algo.run()
        self.result_ = algo.getResult()
        output_dimension = self.result_.getMetaModel().getOutputDimension()

        # sensitivity
        si = ot.FunctionalChaosSobolIndices(self.result_)
        if output_dimension == 1:
            self.feature_importances_ = [
                si.getSobolIndex(i) for i in range(input_dimension)
            ]
        else:
            self.feature_importances_ = [[0.0] * input_dimension
                                         ] * output_dimension
            for k in range(output_dimension):
                for i in range(input_dimension):
                    self.feature_importances_[k][i] = si.getSobolIndex(i, k)
        self.feature_importances_ = np.array(self.feature_importances_)
        return self
Exemplo n.º 15
0
# Input distribution
distribution = ot.ComposedDistribution([ot.Normal()] * inputDimension)

# Correlated input distribution
S = ot.CorrelationMatrix(inputDimension)
S[1, 0] = 0.3
R = ot.NormalCopula().GetCorrelationFromSpearmanCorrelation(S)
myCopula = ot.NormalCopula(R)
myCorrelatedInputDistribution = ot.ComposedDistribution(
    [ot.Normal()] * inputDimension, myCopula)

sample = myCorrelatedInputDistribution.getSample(2000)

# Orthogonal basis
enumerateFunction = ot.LinearEnumerateFunction(inputDimension)
productBasis = ot.OrthogonalProductPolynomialFactory(
    [ot.HermiteFactory()] * inputDimension, enumerateFunction)
# Adaptive strategy
adaptiveStrategy = ot.FixedStrategy(
    productBasis, enumerateFunction.getStrataCumulatedCardinal(4))
# Projection strategy
samplingSize = 250
projectionStrategy = ot.LeastSquaresStrategy(
    ot.MonteCarloExperiment(samplingSize))

# Polynomial chaos algorithm
algo = ot.FunctionalChaosAlgorithm(
    model, distribution, adaptiveStrategy, projectionStrategy)
algo.run()

# Post-process the results
result = ot.FunctionalChaosResult(algo.getResult())
Exemplo n.º 16
0
    def _compute_coefficients_legendre(self,
                                       sample_paths,
                                       legendre_quadrature_order=None):

        dimension = self._lower_bound.size
        truncation_order = self._truncation_order
        if legendre_quadrature_order is None:
            legendre_quadrature_order = self._legendre_quadrature_order
        elif type(legendre_quadrature_order) is not int \
                or legendre_quadrature_order <= 0:
            raise ValueError('legendre_quadrature_order must be a positive ' +
                             'integer.')
        n_sample_paths = len(sample_paths)

        # Gauss-Legendre quadrature nodes and weights
        polyColl = ot.PolynomialFamilyCollection([ot.LegendreFactory()] *
                                                 dimension)
        polynoms = ot.OrthogonalProductPolynomialFactory(polyColl)
        U, W = polynoms.getNodesAndWeights(
            ot.Indices([legendre_quadrature_order] * dimension))
        W = np.ravel(W)
        U = np.array(U)
        scale = (self._upper_bound - self._lower_bound) / 2.
        shift = (self._upper_bound + self._lower_bound) / 2.
        X = scale * U + shift

        # Compute coefficients
        try:
            available_memory = int(.9 * get_available_memory())
        except:
            if self.verbose:
                print('WRN: Available memory estimation failed! '
                      'Assuming 1Gb is available (first guess).')
            available_memory = 1024**3
        max_size = int(available_memory / 8 / truncation_order /
                       n_sample_paths)
        batch_size = min(W.size, max_size)
        if self.verbose and batch_size < W.size:
            print('RAM: %d Mb available' % (available_memory / 1024**2))
            print('RAM: %d allocable terms / %d total terms' %
                  (max_size, W.size))
            print('RAM: %d loops required' % np.ceil(float(W.size) / max_size))
        while True:
            coefficients = np.zeros((n_sample_paths, truncation_order))
            try:
                n_done = 0
                while n_done < W.size:
                    sample_paths_values = np.vstack([
                        np.ravel(sample_paths[i](X[n_done:(n_done +
                                                           batch_size)]))
                        for i in range(n_sample_paths)
                    ])
                    mean_values = np.ravel(
                        self._mean(X[n_done:(n_done +
                                             batch_size)]))[np.newaxis, :]
                    centered_sample_paths_values = \
                        sample_paths_values - mean_values
                    del sample_paths_values, mean_values
                    eigenelements_values = np.vstack([
                        self._eigenfunctions[k](
                            X[n_done:(n_done + batch_size)]) /
                        np.sqrt(self._eigenvalues[k])
                        for k in range(truncation_order)
                    ])
                    coefficients += np.sum(
                        W[np.newaxis, np.newaxis, n_done:(n_done + batch_size)]
                        * centered_sample_paths_values[:, np.newaxis, :] *
                        eigenelements_values[np.newaxis, :, :],
                        axis=-1)
                    del centered_sample_paths_values, eigenelements_values
                    n_done += batch_size
                break
            except MemoryError:
                batch_size /= 2
        coefficients *= np.prod(self._upper_bound - self._lower_bound)

        return coefficients
Exemplo n.º 17
0
    def _legendre_galerkin_scheme(self,
                                  legendre_galerkin_order=10,
                                  legendre_quadrature_order=None):

        # Input checks
        if legendre_galerkin_order <= 0:
            raise ValueError('legendre_galerkin_order must be a positive ' +
                             'integer!')

        if legendre_quadrature_order is not None:
            if legendre_quadrature_order <= 0:
                raise ValueError('legendre_quadrature_order must be a ' +
                                 'positive integer!')

        # Settings
        dimension = self._lower_bound.size
        truncation_order = self._truncation_order
        galerkin_size = ot.EnumerateFunction(
            dimension).getStrataCumulatedCardinal(legendre_galerkin_order)
        if legendre_quadrature_order is None:
            legendre_quadrature_order = 2 * legendre_galerkin_order + 1

        # Check if the current settings are compatible
        if truncation_order > galerkin_size:
            raise ValueError(
                'The truncation order must be less than or ' +
                'equal to the size of the functional basis in the chosen ' +
                'Legendre Galerkin scheme. Current size of the galerkin basis '
                + 'only allows to get %d terms in the KL expansion.' %
                galerkin_size)

        # Construction of the Galerkin basis: tensorized Legendre polynomials
        tensorized_legendre_polynomial_factory = \
            ot.PolynomialFamilyCollection([ot.LegendreFactory()] * dimension)
        tensorized_legendre_polynomial_factory = \
            ot.OrthogonalProductPolynomialFactory(
                tensorized_legendre_polynomial_factory)
        tensorized_legendre_polynomials = \
            [tensorized_legendre_polynomial_factory.build(i)
             for i in range(galerkin_size)]

        # Compute matrix C coefficients using Gauss-Legendre quadrature
        polyColl = ot.PolynomialFamilyCollection([ot.LegendreFactory()] *
                                                 dimension * 2)
        polynoms = ot.OrthogonalProductPolynomialFactory(polyColl)
        U, W = polynoms.getNodesAndWeights(
            ot.Indices([legendre_quadrature_order] * dimension * 2))
        W = np.ravel(W)
        scale = (self._upper_bound - self._lower_bound) / 2.
        shift = (self._upper_bound + self._lower_bound) / 2.
        U = np.array(U)
        X = np.repeat(scale, 2) * U + np.repeat(shift, 2)

        if self.verbose:
            print('Computing matrix C...')

        try:
            available_memory = int(.9 * get_available_memory())
        except:
            if self.verbose:
                print('WRN: Available memory estimation failed! '
                      'Assuming 1Gb is available (first guess).')
            available_memory = 1024**3
        max_size = int(available_memory / 8 / galerkin_size**2)
        batch_size = min(W.size, max_size)
        if self.verbose and batch_size < W.size:
            print('RAM: %d Mb available' % (available_memory / 1024**2))
            print('RAM: %d allocable terms / %d total terms' %
                  (max_size, W.size))
            print('RAM: %d loops required' % np.ceil(float(W.size) / max_size))
        while True:
            C = np.zeros((galerkin_size, galerkin_size))
            try:
                n_done = 0
                while n_done < W.size:
                    covariance_at_X = self._covariance(X[n_done:(n_done +
                                                                 batch_size)])
                    H1 = np.vstack([
                        np.ravel(tensorized_legendre_polynomials[i](
                            U[n_done:(n_done + batch_size), :dimension]))
                        for i in range(galerkin_size)
                    ])
                    H2 = np.vstack([
                        np.ravel(tensorized_legendre_polynomials[i](
                            U[n_done:(n_done + batch_size), dimension:]))
                        for i in range(galerkin_size)
                    ])
                    C += np.sum(W[np.newaxis, np.newaxis,
                                  n_done:(n_done + batch_size)] *
                                covariance_at_X[np.newaxis, np.newaxis, :] *
                                H1[np.newaxis, :, :] * H2[:, np.newaxis, :],
                                axis=-1)
                    del covariance_at_X, H1, H2
                    n_done += batch_size
                break
            except MemoryError:
                batch_size /= 2
        C *= np.prod(self._upper_bound - self._lower_bound)**2.

        # Matrix B is orthonormal up to some constant
        B = np.diag(
            np.repeat(np.prod(self._upper_bound - self._lower_bound),
                      galerkin_size))

        # Solve the generalized eigenvalue problem C D = L B D in L, D
        if self.verbose:
            print('Solving generalized eigenvalue problem...')
        eigenvalues, eigenvectors = linalg.eigh(C, b=B, lower=True)
        eigenvalues, eigenvectors = eigenvalues.real, eigenvectors.real

        # Sort the eigensolutions in the descending order of eigenvalues
        order = eigenvalues.argsort()[::-1]
        eigenvalues = eigenvalues[order]
        eigenvectors = eigenvectors[:, order]

        # Truncate the expansion
        eigenvalues = eigenvalues[:truncation_order]
        eigenvectors = eigenvectors[:, :truncation_order]

        # Eliminate unsignificant negative eigenvalues
        if eigenvalues.min() <= 0.:
            if eigenvalues.min() > .01 * eigenvalues.max():
                raise Exception(
                    'The smallest significant eigenvalue seems ' +
                    'to be negative... Check the positive definiteness of the '
                    + 'covariance function.')
            else:
                truncation_order = np.nonzero(eigenvalues <= 0)[0][0]
                eigenvalues = eigenvalues[:truncation_order]
                eigenvectors = eigenvectors[:, :truncation_order]
                self._truncation_order = truncation_order
                print('WRN: truncation_order was too large.')
                print('It has been reset to: %d' % truncation_order)

        # Define eigenfunctions
        class LegendrePolynomialsBasedEigenFunction():
            def __init__(self, vector):
                self._vector = vector

            def __call__(self, x):
                x = np.asanyarray(x)
                if x.ndim <= 1:
                    x = np.atleast_2d(x).T
                u = (x - shift) / scale
                return np.sum([
                    np.ravel(tensorized_legendre_polynomials[i](u)) *
                    self._vector[i] for i in range(truncation_order)
                ],
                              axis=0)

        # Set attributes
        self._eigenvalues = eigenvalues
        self._eigenfunctions = [
            LegendrePolynomialsBasedEigenFunction(vector)
            for vector in eigenvectors.T
        ]
        self._legendre_galerkin_order = legendre_galerkin_order
        self._legendre_quadrature_order = legendre_quadrature_order
Exemplo n.º 18
0
                                temperatureProfile, space_filling)
# Retrieve optimal design
input_database = algo.generate()

result = algo.getResult()

print('initial design computed')

# Response of the model
print('sampling size = ', N)
output_database = ishigami_model(input_database)

# Learning input/output
# Usual chaos meta model
enumerate_function = ot.HyperbolicAnisotropicEnumerateFunction(dimension)
orthogonal_basis = ot.OrthogonalProductPolynomialFactory(
    dimension * [ot.LegendreFactory()], enumerate_function)
basis_size = 100
# Initial chaos algorithm
adaptive_strategy = ot.FixedStrategy(orthogonal_basis, basis_size)
# ProjectionStrategy ==> Sparse
fitting_algorithm = ot.KFold()
approximation_algorithm = ot.LeastSquaresMetaModelSelectionFactory(
    ot.LARS(), fitting_algorithm)
projection_strategy = ot.LeastSquaresStrategy(input_database, output_database,
                                              approximation_algorithm)
print('Surrogate model...')
distribution_ishigami = ot.ComposedDistribution(dimension *
                                                [ot.Uniform(-pi, pi)])
algo_pc = ot.FunctionalChaosAlgorithm(input_database, output_database,
                                      distribution_ishigami, adaptive_strategy,
                                      projection_strategy)
Exemplo n.º 19
0
F = FUNC(mesh, result)
model = ot.Function(F)

dim = model.getInputDimension()
print("dim=", dim)
size = dim + 1
distribution = ot.ComposedDistribution([ot.Normal()] * dim)
weightedExperiment = ot.MonteCarloExperiment(distribution, size)
inSample, weights = weightedExperiment.generateWithWeights()
print("Sample model")
t0 = time()
outSample = model(inSample)
t1 = time()
print("t=", t1 - t0, "s, speed=", inSample.getSize() / (t1 - t0), "evals/s")

basis = ot.OrthogonalProductPolynomialFactory([ot.HermiteFactory()] * dim)
adaptive = ot.FixedStrategy(basis, dim + 1)
projection = ot.LeastSquaresStrategy(weightedExperiment)
algo = ot.FunctionalChaosAlgorithm(inSample, outSample, distribution, adaptive,
                                   projection)
algo.run()
vector = ot.FunctionalChaosRandomVector(algo.getResult())

# Field of Sobol indices
#for i in range(dim):
for i in range(15, 16):
    print("i=", i)
    sobol = [
        vector.getSobolIndex(i, j) for j in range(mesh.getVerticesNumber())
    ]
    field = ot.Field(mesh, [[x] for x in sobol])
# %%
# This factory corresponds to the `Uniform` distribution in the [-1,1] interval. 

# %%
univariateFactory.getMeasure()

# %%
# This interval does not correspond to the interval on which the input marginals are defined (we will come back to this topic later), but this will, anyway, create a consistent trend for the kriging.

# %%
polyColl = [univariateFactory]*dimension

# %%
enumerateFunction = ot.LinearEnumerateFunction(dimension)
productBasis = ot.OrthogonalProductPolynomialFactory(polyColl, enumerateFunction)

# %%
functions = []
numberOfTrendCoefficients = 12
for i in range(numberOfTrendCoefficients):
    multivariatepolynomial = productBasis.build(i)
    print(multivariatepolynomial)
    functions.append(multivariatepolynomial)

# %%
basis = ot.Basis(functions)

# %%
# Create the metamodel
# --------------------
Exemplo n.º 21
0
    #     f = (x+0.75)**2-0.75**2
    # else:
    #     f = 2.0-x**2
    xarray = np.array(X, copy=False)
    return np.piecewise(xarray, [xarray < 0, xarray >= 0],
                        [lambda x: x * (x + 1.5), lambda x: 2.0 - x * x])


f = ot.PythonFunction(1, 1, func_sample=piecewise)

# %%
# Build a metamodel over each segment
degree = 5
samplingSize = 100
enumerateFunction = ot.LinearEnumerateFunction(dimension)
productBasis = ot.OrthogonalProductPolynomialFactory(
    [ot.LegendreFactory()] * dimension, enumerateFunction)
adaptiveStrategy = ot.FixedStrategy(
    productBasis, enumerateFunction.getStrataCumulatedCardinal(degree))
projectionStrategy = ot.LeastSquaresStrategy(
    ot.MonteCarloExperiment(samplingSize))

# %%
# Segment 1: (-1.0; 0.0)
d1 = ot.Uniform(-1.0, 0.0)
fc1 = ot.FunctionalChaosAlgorithm(f, d1, adaptiveStrategy, projectionStrategy)
fc1.run()
mm1 = fc1.getResult().getMetaModel()
graph = mm1.draw(-1.0, -1e-6)
view = viewer.View(graph)

# %%
Exemplo n.º 22
0
def alsace(func,
           N,
           jpdf,
           tol=1e-22,
           sample_type='R',
           limit_cond=5,
           max_fcalls=1000,
           seed=123,
           ed_file=None,
           ed_fevals_file=None,
           verbose=True,
           pce_dict={}):
    """
    ALSACE - Approximations via Lower-Set and Least-Squares-based Adaptive Chaos Expansions

    func: function to be approximated.
    N: number of parameters.
    jpdf: joint probability density function.
    limit_cond: maximum allowed condition number of tr(inv(D.T*D))
    sample_type: 'R'-random, 'L'-LHS
    seed: sampling seed
    tol, max_fcalls: exit criteria, self-explanatory.
    ed_file, ed_fevals_file: experimental design and corresponding evaluations

    'act': activated, i.e. already part of the approximation.
    'adm': admissible, i.e. candidates for the approximation's expansion.
    """

    if not pce_dict:  # if pce_dict is empty --> cold-start
        idx_act = []
        idx_act.append([0] * N)  # start with 0 multi-index
        idx_adm = []
        # set seed
        ot.RandomGenerator.SetSeed(seed)
        ed_size = 2 * N  # initial number of samples
        # initial experimental design and coresponding evaluations
        ed, ed_fevals = get_ed(func,
                               jpdf,
                               ed_size,
                               sample_type=sample_type,
                               knots=[],
                               values=[],
                               ed_file=ed_file,
                               ed_fevals_file=ed_fevals_file)
        global_error_indicator = 1.0  # give arbitrary sufficiently large value

        # get the distribution type of each random variable
        dist_types = []
        for i in range(N):
            dist_type = jpdf.getMarginal(i).getName()
            dist_types.append(dist_type)

        # create orthogonal univariate bases
        poly_collection = ot.PolynomialFamilyCollection(N)
        for i in range(N):
            pdf = jpdf.getDistributionCollection()[i]
            algo = ot.AdaptiveStieltjesAlgorithm(pdf)
            poly_collection[i] = ot.StandardDistributionPolynomialFactory(algo)

        # create multivariate basis
        mv_basis = ot.OrthogonalProductPolynomialFactory(
            poly_collection, ot.EnumerateFunction(N))
        # get enumerate function (multi-index handling)
        enum_func = mv_basis.getEnumerateFunction()

    else:  # get data from dictionary
        idx_act = pce_dict['idx_act']
        idx_adm = pce_dict['idx_adm']
        pce_coeff_act = pce_dict['pce_coeff_act']
        pce_coeff_adm = pce_dict['pce_coeff_adm']
        ed = pce_dict['ed']
        ed_fevals = pce_dict['ed_fevals']
        ed_size = len(ed_fevals)
        # compute local and global error indicators
        global_error_indicator = np.sum(np.array(pce_coeff_adm)**2)
        enum_func = pce_dict['enum_func']
        mv_basis = pce_dict['mv_basis']

    #
    while ed_size < max_fcalls and global_error_indicator > tol:
        # the index added last to the activated set is the one to be refined
        last_act_idx = idx_act[-1][:]
        # get admissible neighbors of the lastly added index
        adm_neighbors = admissible_neighbors(last_act_idx, idx_act)
        # update admissible indices
        idx_adm = idx_adm + adm_neighbors
        # get polynomial basis for the LS problem
        idx_ls = idx_act + idx_adm
        idx_ls_single = transform_multi_index_set(idx_ls, enum_func)
        ls_basis = mv_basis.getSubBasis(idx_ls_single)
        ls_basis_size = len(ls_basis)

        # construct the design matrix D and compute its QR decomposition
        D = get_design_matrix(ls_basis, ed)
        Q, R = sp.qr(D, mode='economic')
        # construct information matrix A= D^T*D
        A = np.matmul(D.T, D) / ed_size
        trAinv_test = np.sum(1. / np.linalg.eig(A)[0])
        trAinv = np.trace(np.linalg.inv(A))
        print('new trace ', trAinv_test)
        print('old trace ', trAinv)

        # If tr(A) becomes too large, enrich the ED until tr(A) becomes
        # acceptable or until ed_size reaches max_fcalls
        while (trAinv > limit_cond
               and ed_size < max_fcalls) or ed_size < ls_basis_size:
            # inform user
            if verbose:
                print('WARNING: tr(inv(A)) = ', trAinv)
                print('WARNING: cond(D) = ', np.linalg.cond(D))
                print("")
            # select new size for the ED
            if ls_basis_size > ed_size:
                ed_size = ls_basis_size + N
            elif ed_size + N > max_fcalls:
                ed_size = max_fcalls
            else:
                ed_size = ed_size + N
            # expand ED
            ed, ed_fevals = get_ed(func,
                                   jpdf,
                                   ed_size,
                                   sample_type=sample_type,
                                   knots=ed,
                                   values=ed_fevals,
                                   ed_file=ed_file,
                                   ed_fevals_file=ed_fevals_file)
            # construct the design matrix D and compute its QR decomposition
            D = get_design_matrix(ls_basis, ed)
            Q, R = sp.qr(D, mode='economic')
            # construct information matrix A= D^T*D
            A = np.matmul(D.T, D) / ed_size
            trAinv = np.trace(np.linalg.inv(A))

        # solve LS problem
        c = Q.T.dot(ed_fevals)
        pce_coeff_ls = sp.solve_triangular(R, c)

        # find the multi-index with the largest contribution, add it to idx_act
        # and delete it from idx_adm
        pce_coeff_act = pce_coeff_ls[:len(idx_act)].tolist()
        pce_coeff_adm = pce_coeff_ls[-len(idx_adm):].tolist()
        help_idx = np.argmax(np.abs(pce_coeff_adm))
        idx_add = idx_adm.pop(help_idx)
        pce_coeff_add = pce_coeff_adm.pop(help_idx)
        idx_act.append(idx_add)
        pce_coeff_act.append(pce_coeff_add)

    # store expansion data in dictionary
    pce_dict = {}
    pce_dict['idx_act'] = idx_act
    pce_dict['idx_adm'] = idx_adm
    pce_dict['pce_coeff_act'] = pce_coeff_act
    pce_dict['pce_coeff_adm'] = pce_coeff_adm
    pce_dict['ed'] = ed
    pce_dict['ed_fevals'] = ed_fevals
    pce_dict['enum_func'] = enum_func
    pce_dict['mv_basis'] = mv_basis

    return pce_dict
Exemplo n.º 23
0
import openturns as ot
from math import log

Sample = ot.NumericalSample.ImportFromTextFile("DATA_test2.csv", ",")
# Sample.setDescription(["BIO","SAL","pH","K","Na","Zn"])
print(Sample)

X = Sample[:, 1:6]
Y = Sample[:, 0]

################################################################################################
# Build a model BIO~SAL+pH+K+Na+Zn
dim = X.getDimension()
enumerateFunction = ot.EnumerateFunction(dim)
factory = ot.OrthogonalProductPolynomialFactory([ot.MonomialFactory()]*dim, enumerateFunction)

# Build 'interactions' as a list of list [a1,a2,a3,a4,a5], and we will generate tensorized
# polynomials SAL^a1*pH^a2*K^a3*Na^a4*Zn^a5.

# BIO~SAL+pH+K+Na+Zn
interactions = []
interactions.append([0]*dim)
for i in xrange(dim):
  indices = [0]*dim
  indices[i] = 1
  # Y ~ I(Xi)^1
  interactions.append(indices[:])

basis = ot.Basis([factory.build(enumerateFunction.inverse(indices)) for indices in interactions])
################################################################################################
# Création d'une distribution ? en fonction de la collection
myDistribution = ot.ComposedDistribution(myCollection)
# ???
vectX = ot.RandomVector(myDistribution)

########################
### Chaos Polynomial ###
########################

polyColl = ot.PolynomialFamilyCollection(dim)
for i in range(dim):
	polyColl[i] = ot.HermiteFactory()

enumerateFunction = ot.LinearEnumerateFunction(dim)
multivariateBasis = ot.OrthogonalProductPolynomialFactory(polyColl, enumerateFunction)

basisSequenceFactory = ot.LARS()
fittingAlgorithm = ot.CorrectedLeaveOneOut()
approximationAlgorithm = ot.LeastSquaresMetaModelSelectionFactory(basisSequenceFactory, fittingAlgorithm)

# Génération du plan d'expériences
N = 200
ot.RandomGenerator.SetSeed(77)
Liste_test = ot.LHSExperiment(myDistribution, N)
InputSample = Liste_test.generate()


# Ecriture du fichier de données d'entrées
fidResult=open('IT'+IT0+'/IT'+IT0+'_TIRAGE_200.txt',"a")
for i in range(N):
Exemplo n.º 25
0
# Create the input independent joint distribution
distribution = ot.Normal(2)
distribution.setDescription(['X1', 'X2'])

# %%
# Create the correlated input distribution
S = ot.CorrelationMatrix(2)
S[1, 0] = 0.3
R = ot.NormalCopula.GetCorrelationFromSpearmanCorrelation(S)
copula = ot.NormalCopula(R)
distribution_corr = ot.ComposedDistribution([ot.Normal()] * 2, copula)

# %%
# ANCOVA needs a functional decomposition of the model
enumerateFunction = ot.LinearEnumerateFunction(2)
productBasis = ot.OrthogonalProductPolynomialFactory([ot.HermiteFactory()] * 2,
                                                     enumerateFunction)
adaptiveStrategy = ot.FixedStrategy(
    productBasis, enumerateFunction.getStrataCumulatedCardinal(4))
samplingSize = 250
projectionStrategy = ot.LeastSquaresStrategy(
    ot.MonteCarloExperiment(samplingSize))
algo = ot.FunctionalChaosAlgorithm(model, distribution, adaptiveStrategy,
                                   projectionStrategy)
algo.run()
result = ot.FunctionalChaosResult(algo.getResult())

# %%
# Create the input sample taking account the correlation
size = 2000
sample = distribution_corr.getSample(size)
Exemplo n.º 26
0
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot

# Polynomial factories
factoryCollection = [
    ot.LaguerreFactory(2.5),
    ot.LegendreFactory(),
    ot.HermiteFactory()
]
dim = len(factoryCollection)
basisFactory = ot.OrthogonalProductPolynomialFactory(factoryCollection)
basis = ot.OrthogonalBasis(basisFactory)
print('basis=', basis)
x = [0.5] * dim
for i in range(10):
    f = basis.build(i)
    print('i=', i, 'f(X)=', f(x))

# Using multi-indices
enum = basis.getEnumerateFunction()
for i in range(10):
    indices = enum(i)
    f = basis.build(indices)
    print('indices=', indices, 'f(X)=', f(x))

# Other factories
factoryCollection = [
    ot.OrthogonalUniVariatePolynomialFunctionFactory(ot.LaguerreFactory(2.5)),
    ot.HaarWaveletFactory(),
Exemplo n.º 27
0
# We also have access to the input variable names with
input_names = im.distributionX.getDescription()

# %%
# Create a training sample

# %%
N = 100
inputTrain = im.distributionX.getSample(N)
outputTrain = im.model(inputTrain)

# %%
# Create the chaos.

# %%
multivariateBasis = ot.OrthogonalProductPolynomialFactory(
    [im.X1, im.X2, im.X3])
selectionAlgorithm = ot.LeastSquaresMetaModelSelectionFactory()
projectionStrategy = ot.LeastSquaresStrategy(inputTrain, outputTrain,
                                             selectionAlgorithm)
totalDegree = 8
enumfunc = multivariateBasis.getEnumerateFunction()
P = enumfunc.getStrataCumulatedCardinal(totalDegree)
adaptiveStrategy = ot.FixedStrategy(multivariateBasis, P)
chaosalgo = ot.FunctionalChaosAlgorithm(inputTrain, outputTrain,
                                        im.distributionX, adaptiveStrategy,
                                        projectionStrategy)

# %%
chaosalgo.run()
result = chaosalgo.getResult()
metamodel = result.getMetaModel()
Exemplo n.º 28
0
dim_input = cb.dim  # dimension of the input
dim_output = 1  # dimension of the output

# %%
# We load the model :
g = cb.model

# %%
# Create a polynomial chaos decomposition
# ---------------------------------------

# %%
# We create the multivariate polynomial basis by tensorization of the univariate polynomials and the default linear enumerate rule.

# %%
multivariateBasis = ot.OrthogonalProductPolynomialFactory(
    [dist_E, dist_F, dist_L, dist_I])

# %%
# Generate an training sample of size N with MC simulation.

# %%
N = 50  # size of the experimental design
inputTrain = myDistribution.getSample(N)
outputTrain = g(inputTrain)

# %%
# We select the `FixedStrategy` truncation rule, which corresponds to using the first `P` polynomials of the polynomial basis. In this case, we select `P` using the `getStrataCumulatedCardinal` method, so that all polynomials with total degree lower or equal to 5 are used.

# %%
totalDegree = 5
enumfunc = multivariateBasis.getEnumerateFunction()
Exemplo n.º 29
0
metaModel = ot.PointToFieldConnection(postProcessing,
                                      algo.getResult().getMetaModel())

graph = validationOutputSample.drawMarginal(0)
graph.setColors(['red'])
graph2 = metaModel(validationInputSample).drawMarginal(0)
graph2.setColors(['blue'])
graph.add(graph2)
graph.setTitle('Comparaison modele/meta-modele')
graph.setXTitle(r'$t$')
graph.setYTitle(r'$z$')
otv.View(graph)

# Second, using a more evolved interface
basis = ot.OrthogonalProductPolynomialFactory([
    ot.StandardDistributionPolynomialFactory(distX.getMarginal(i))
    for i in range(distX.getDimension())
])
adaptiveStrategy = ot.FixedStrategy(
    basis,
    ot.EnumerateFunction(distX.getDimension()).getStrataCumulatedCardinal(6))
projectionStrategy = ot.LeastSquaresStrategy(
    ot.LeastSquaresMetaModelSelectionFactory(ot.LARS(),
                                             ot.CorrectedLeaveOneOut()))
algo = ot.FunctionalChaosAlgorithm(inputSample, outputSampleChaos, distX,
                                   adaptiveStrategy, projectionStrategy)
algo.run()
metaModel = ot.PointToFieldConnection(postProcessing,
                                      algo.getResult().getMetaModel())

graph = validationOutputSample.drawMarginal(0)
graph.setColors(['red'])