예제 #1
0
    def __init__(self, beta_coeff, idx_set, jpdf):
        self.beta_coeff = beta_coeff
        self.idx_set = idx_set
        self.jpdf = jpdf
        self.N = jpdf.getDimension()

        # get the distribution type of each random variable
        dist_types = []
        for i in range(self.N):
            dist_type = self.jpdf.getMarginal(i).getName()
            dist_types.append(dist_type)

        # create orthogonal univariate bases
        poly_collection = ot.PolynomialFamilyCollection(self.N)
        for i in range(self.N):
            pdf = jpdf.getDistributionCollection()[i]
            algo = ot.AdaptiveStieltjesAlgorithm(pdf)
            poly_collection[i] = ot.StandardDistributionPolynomialFactory(algo)

        # create multivariate basis
        multivariate_basis = ot.OrthogonalProductPolynomialFactory(
            poly_collection, ot.EnumerateFunction(self.N))
        # get enumerate function (multi-index handling)
        enum_func = multivariate_basis.getEnumerateFunction()
        # get epansion
        self.expansion = multivariate_basis.getSubBasis(
            transform_multi_index_set(idx_set, enum_func))
        # create openturns surrogate model
        sur_model = ot.FunctionCollection()
        for i in range(len(self.expansion)):
            multi = str(beta_coeff[i]) + '*x'
            help_function = ot.SymbolicFunction(['x'], [multi])
            sur_model.add(ot.ComposedFunction(help_function,
                                              self.expansion[i]))
        self.surrogate_model = np.sum(sur_model)
예제 #2
0
    def _buildChaosAlgo(self, inputSample, outputSample):
        """
        Build the functional chaos algorithm without running it.
        """
        if self._distribution is None:
            # create default distribution : Uniform between min and max of the
            # input sample
            inputSample = ot.NumericalSample(inputSample)
            inputMin = inputSample.getMin()
            inputMin[0] = np.min(self._defectSizes)
            inputMax = inputSample.getMax()
            inputMax[0] = np.max(self._defectSizes)
            marginals = [
                ot.Uniform(inputMin[i], inputMax[i]) for i in range(self._dim)
            ]
            self._distribution = ot.ComposedDistribution(marginals)

        # put description of the inputSample into decription of the distribution
        self._distribution.setDescription(inputSample.getDescription())

        if self._adaptiveStrategy is None:
            # Create the adaptive strategy : default is fixed strategy of degree 5
            # with linear enumerate function
            polyCol = [0.] * self._dim
            for i in range(self._dim):
                polyCol[i] = ot.StandardDistributionPolynomialFactory(
                    self._distribution.getMarginal(i))

            enumerateFunction = ot.EnumerateFunction(self._dim)
            multivariateBasis = ot.OrthogonalProductPolynomialFactory(
                polyCol, enumerateFunction)
            # default degree is 3 (in __init__)
            indexMax = enumerateFunction.getStrataCumulatedCardinal(
                self._degree)
            self._adaptiveStrategy = ot.FixedStrategy(multivariateBasis,
                                                      indexMax)

        if self._projectionStrategy is None:
            # sparse polynomial chaos
            basis_sequence_factory = ot.LAR()
            fitting_algorithm = ot.KFold()
            approximation_algorithm = ot.LeastSquaresMetaModelSelectionFactory(
                basis_sequence_factory, fitting_algorithm)
            self._projectionStrategy = ot.LeastSquaresStrategy(
                inputSample, outputSample, approximation_algorithm)

        return ot.FunctionalChaosAlgorithm(inputSample, outputSample, \
                self._distribution, self._adaptiveStrategy, self._projectionStrategy)
예제 #3
0
def dali_pce(func,
             N,
             jpdf_cp,
             jpdf_ot,
             tol=1e-12,
             max_fcalls=1000,
             verbose=True,
             interp_dict={}):

    if not interp_dict:  # if dictionary is empty --> cold-start
        idx_act = []  # M_activated x N
        idx_adm = []  # M_admissible x N
        fevals_act = []  # M_activated x 1
        fevals_adm = []  # M_admissible x 1
        coeff_act = []  # M_activated x 1
        coeff_adm = []  #  M_admissible x 1

        # start with 0 multi-index
        knot0 = []
        for n in range(N):
            # get knots per dimension based on maximum index
            kk, ww = seq_lj_1d(order=0, dist=jpdf_cp[n])
            knot0.append(kk[0])
        feval = func(knot0)

        # update activated sets
        idx_act.append([0] * N)
        coeff_act.append(feval)
        fevals_act.append(feval)

        # local error indicators
        local_error_indicators = np.abs(coeff_act)

        # get the OT distribution type of each random variable
        dist_types = []
        for i in range(N):
            dist_type = jpdf_ot.getMarginal(i).getName()
            dist_types.append(dist_type)

        # create orthogonal univariate bases
        poly_collection = ot.PolynomialFamilyCollection(N)
        for i in range(N):
            if dist_types[i] == 'Uniform':
                poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
                    ot.LegendreFactory())
            elif dist_types[i] == 'Normal':
                poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
                    ot.HermiteFactory())
            elif dist_types[i] == 'Beta':
                poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
                    ot.JacobiFactory())
            elif dist_types[i] == 'Gamma':
                poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
                    ot.LaguerreFactory())
            else:
                pdf = jpdf_ot.getDistributionCollection()[i]
                algo = ot.AdaptiveStieltjesAlgorithm(pdf)
                poly_collection[i] = ot.StandardDistributionPolynomialFactory(
                    algo)

        # create multivariate basis
        mv_basis = ot.OrthogonalProductPolynomialFactory(
            poly_collection, ot.EnumerateFunction(N))
        # get enumerate function (multi-index handling)
        enum_func = mv_basis.getEnumerateFunction()

    else:
        idx_act = interp_dict['idx_act']
        idx_adm = interp_dict['idx_adm']
        coeff_act = interp_dict['coeff_act']
        coeff_adm = interp_dict['coeff_adm']
        fevals_act = interp_dict['fevals_act']
        fevals_adm = interp_dict['fevals_adm']
        mv_basis = interp_dict['mv_basis']
        enum_func = interp_dict['enum_func']
        # local error indicators
        local_error_indicators = np.abs(coeff_adm)

    # compute global error indicator
    global_error_indicator = local_error_indicators.sum()  # max or sum

    # fcalls / M approx. terms up to now
    fcalls = len(idx_act) + len(idx_adm)  # fcalls = M --> approx. terms

    # maximum index per dimension
    max_idx_per_dim = np.max(idx_act + idx_adm, axis=0)

    # univariate knots and polynomials per dimension
    knots_per_dim = {}
    for n in range(N):
        kk, ww = seq_lj_1d(order=max_idx_per_dim[n], dist=jpdf_cp[n])
        knots_per_dim[n] = kk

    # start iterations
    while global_error_indicator > tol and fcalls < max_fcalls:
        if verbose:
            print(fcalls)
            print(global_error_indicator)

        # the index added last to the activated set is the one to be refined
        last_act_idx = idx_act[-1][:]
        # compute the knot corresponding to the lastly added index
        last_knot = [
            knots_per_dim[n][i] for n, i in zip(range(N), last_act_idx)
        ]
        # get admissible neighbors of the lastly added index
        adm_neighbors = admissible_neighbors(last_act_idx, idx_act)

        for an in adm_neighbors:
            # update admissible index set
            idx_adm.append(an)
            # find which parameter/direction n (n=1,2,...,N) gets refined
            n_ref = np.argmin(
                [idx1 == idx2 for idx1, idx2 in zip(an, last_act_idx)])
            # sequence of 1d Leja nodes/weights for the given refinement
            knots_n, weights_n = seq_lj_1d(an[n_ref], jpdf_cp[int(n_ref)])

            # update max_idx_per_dim, knots_per_dim, if necessary
            if an[n_ref] > max_idx_per_dim[n_ref]:
                max_idx_per_dim[n_ref] = an[n_ref]
                knots_per_dim[n_ref] = knots_n

            # find new_knot and compute function on new_knot
            new_knot = last_knot[:]
            new_knot[n_ref] = knots_n[-1]
            feval = func(new_knot)
            fevals_adm.append(feval)
            fcalls += 1  # update function calls

        # create PCE basis
        idx_system = idx_act + idx_adm
        idx_system_single = transform_multi_index_set(idx_system, enum_func)
        system_basis = mv_basis.getSubBasis(idx_system_single)
        # get corresponding evaluations
        fevals_system = fevals_act + fevals_adm
        # multi-dimensional knots
        M = len(idx_system)  # equations terms
        knots_md = [[knots_per_dim[n][idx_system[m][n]] for m in range(M)]
                    for n in range(N)]
        knots_md = np.array(knots_md).T
        # design matrix
        D = get_design_matrix(system_basis, knots_md)
        # solve system of equaations
        Q, R = scl.qr(D, mode='economic')
        c = Q.T.dot(fevals_system)
        coeff_system = scl.solve_triangular(R, c)

        # find the multi-index with the largest contribution, add it to idx_act
        # and delete it from idx_adm
        coeff_act = coeff_system[:len(idx_act)].tolist()
        coeff_adm = coeff_system[-len(idx_adm):].tolist()
        help_idx = np.argmax(np.abs(coeff_adm))
        idx_add = idx_adm.pop(help_idx)
        pce_coeff_add = coeff_adm.pop(help_idx)
        fevals_add = fevals_adm.pop(help_idx)
        idx_act.append(idx_add)
        coeff_act.append(pce_coeff_add)
        fevals_act.append(fevals_add)
        # re-compute coefficients of admissible multi-indices

        # local error indicators
        local_error_indicators = np.abs(coeff_adm)
        # compute global error indicator
        global_error_indicator = local_error_indicators.sum()  # max or sum

    # store expansion data in dictionary
    interp_dict = {}
    interp_dict['idx_act'] = idx_act
    interp_dict['idx_adm'] = idx_adm
    interp_dict['coeff_act'] = coeff_act
    interp_dict['coeff_adm'] = coeff_adm
    interp_dict['fevals_act'] = fevals_act
    interp_dict['fevals_adm'] = fevals_adm
    interp_dict['enum_func'] = enum_func
    interp_dict['mv_basis'] = mv_basis
    return interp_dict
예제 #4
0
def alsace(func,
           N,
           jpdf,
           tol=1e-22,
           sample_type='R',
           limit_cond=5,
           max_fcalls=1000,
           seed=123,
           ed_file=None,
           ed_fevals_file=None,
           verbose=True,
           pce_dict={}):
    """
    ALSACE - Approximations via Lower-Set and Least-Squares-based Adaptive Chaos Expansions

    func: function to be approximated.
    N: number of parameters.
    jpdf: joint probability density function.
    limit_cond: maximum allowed condition number of tr(inv(D.T*D))
    sample_type: 'R'-random, 'L'-LHS
    seed: sampling seed
    tol, max_fcalls: exit criteria, self-explanatory.
    ed_file, ed_fevals_file: experimental design and corresponding evaluations

    'act': activated, i.e. already part of the approximation.
    'adm': admissible, i.e. candidates for the approximation's expansion.
    """

    if not pce_dict:  # if pce_dict is empty --> cold-start
        idx_act = []
        idx_act.append([0] * N)  # start with 0 multi-index
        idx_adm = []
        # set seed
        ot.RandomGenerator.SetSeed(seed)
        ed_size = 2 * N  # initial number of samples
        # initial experimental design and coresponding evaluations
        ed, ed_fevals = get_ed(func,
                               jpdf,
                               ed_size,
                               sample_type=sample_type,
                               knots=[],
                               values=[],
                               ed_file=ed_file,
                               ed_fevals_file=ed_fevals_file)
        global_error_indicator = 1.0  # give arbitrary sufficiently large value

        # get the distribution type of each random variable
        dist_types = []
        for i in range(N):
            dist_type = jpdf.getMarginal(i).getName()
            dist_types.append(dist_type)

        # create orthogonal univariate bases
        poly_collection = ot.PolynomialFamilyCollection(N)
        for i in range(N):
            pdf = jpdf.getDistributionCollection()[i]
            algo = ot.AdaptiveStieltjesAlgorithm(pdf)
            poly_collection[i] = ot.StandardDistributionPolynomialFactory(algo)

        # create multivariate basis
        mv_basis = ot.OrthogonalProductPolynomialFactory(
            poly_collection, ot.EnumerateFunction(N))
        # get enumerate function (multi-index handling)
        enum_func = mv_basis.getEnumerateFunction()

    else:  # get data from dictionary
        idx_act = pce_dict['idx_act']
        idx_adm = pce_dict['idx_adm']
        pce_coeff_act = pce_dict['pce_coeff_act']
        pce_coeff_adm = pce_dict['pce_coeff_adm']
        ed = pce_dict['ed']
        ed_fevals = pce_dict['ed_fevals']
        ed_size = len(ed_fevals)
        # compute local and global error indicators
        global_error_indicator = np.sum(np.array(pce_coeff_adm)**2)
        enum_func = pce_dict['enum_func']
        mv_basis = pce_dict['mv_basis']

    #
    while ed_size < max_fcalls and global_error_indicator > tol:
        # the index added last to the activated set is the one to be refined
        last_act_idx = idx_act[-1][:]
        # get admissible neighbors of the lastly added index
        adm_neighbors = admissible_neighbors(last_act_idx, idx_act)
        # update admissible indices
        idx_adm = idx_adm + adm_neighbors
        # get polynomial basis for the LS problem
        idx_ls = idx_act + idx_adm
        idx_ls_single = transform_multi_index_set(idx_ls, enum_func)
        ls_basis = mv_basis.getSubBasis(idx_ls_single)
        ls_basis_size = len(ls_basis)

        # construct the design matrix D and compute its QR decomposition
        D = get_design_matrix(ls_basis, ed)
        Q, R = sp.qr(D, mode='economic')
        # construct information matrix A= D^T*D
        A = np.matmul(D.T, D) / ed_size
        trAinv_test = np.sum(1. / np.linalg.eig(A)[0])
        trAinv = np.trace(np.linalg.inv(A))
        print('new trace ', trAinv_test)
        print('old trace ', trAinv)

        # If tr(A) becomes too large, enrich the ED until tr(A) becomes
        # acceptable or until ed_size reaches max_fcalls
        while (trAinv > limit_cond
               and ed_size < max_fcalls) or ed_size < ls_basis_size:
            # inform user
            if verbose:
                print('WARNING: tr(inv(A)) = ', trAinv)
                print('WARNING: cond(D) = ', np.linalg.cond(D))
                print("")
            # select new size for the ED
            if ls_basis_size > ed_size:
                ed_size = ls_basis_size + N
            elif ed_size + N > max_fcalls:
                ed_size = max_fcalls
            else:
                ed_size = ed_size + N
            # expand ED
            ed, ed_fevals = get_ed(func,
                                   jpdf,
                                   ed_size,
                                   sample_type=sample_type,
                                   knots=ed,
                                   values=ed_fevals,
                                   ed_file=ed_file,
                                   ed_fevals_file=ed_fevals_file)
            # construct the design matrix D and compute its QR decomposition
            D = get_design_matrix(ls_basis, ed)
            Q, R = sp.qr(D, mode='economic')
            # construct information matrix A= D^T*D
            A = np.matmul(D.T, D) / ed_size
            trAinv = np.trace(np.linalg.inv(A))

        # solve LS problem
        c = Q.T.dot(ed_fevals)
        pce_coeff_ls = sp.solve_triangular(R, c)

        # find the multi-index with the largest contribution, add it to idx_act
        # and delete it from idx_adm
        pce_coeff_act = pce_coeff_ls[:len(idx_act)].tolist()
        pce_coeff_adm = pce_coeff_ls[-len(idx_adm):].tolist()
        help_idx = np.argmax(np.abs(pce_coeff_adm))
        idx_add = idx_adm.pop(help_idx)
        pce_coeff_add = pce_coeff_adm.pop(help_idx)
        idx_act.append(idx_add)
        pce_coeff_act.append(pce_coeff_add)

    # store expansion data in dictionary
    pce_dict = {}
    pce_dict['idx_act'] = idx_act
    pce_dict['idx_adm'] = idx_adm
    pce_dict['pce_coeff_act'] = pce_coeff_act
    pce_dict['pce_coeff_adm'] = pce_coeff_adm
    pce_dict['ed'] = ed
    pce_dict['ed_fevals'] = ed_fevals
    pce_dict['enum_func'] = enum_func
    pce_dict['mv_basis'] = mv_basis

    return pce_dict
예제 #5
0
    def __init__(self, strategy, degree, distributions, N_quad=None, sample=None,
                 stieltjes=True, sparse_param={}):
        """Generate truncature and projection strategies.

        Allong with the strategies the sample is storred as an attribute.
        :attr:`sample` as well as corresponding weights: :attr:`weights`.

        :param str strategy: Least square or Quadrature ['LS', 'Quad', 'SparseLS'].
        :param int degree: Polynomial degree.
        :param  distributions: Distributions of each input parameter.
        :type distributions: lst(:class:`openturns.Distribution`)
        :param array_like sample: Samples for least square
          (n_samples, n_features).
        :param bool stieltjes: Wether to use Stieltjes algorithm for the basis.
        :param dict sparse_param: Parameters for the Sparse Cleaning Truncation
          Strategy and/or hyperbolic truncation of the initial basis.

            - **max_considered_terms** (int) -- Maximum Considered Terms,
            - **most_significant** (int), Most Siginificant number to retain,
            - **significance_factor** (float), Significance Factor,
            - **hyper_factor** (float), factor for hyperbolic truncation
              strategy.
        """
        # distributions
        self.in_dim = len(distributions)
        self.dist = ot.ComposedDistribution(distributions)
        self.sparse_param = sparse_param

        if 'hyper_factor' in self.sparse_param:
            enumerateFunction = ot.EnumerateFunction(self.in_dim, self.sparse_param['hyper_factor'])
        else:
            enumerateFunction = ot.EnumerateFunction(self.in_dim)

        if stieltjes:
            # Tend to result in performance issue
            self.basis = ot.OrthogonalProductPolynomialFactory(
                [ot.StandardDistributionPolynomialFactory(
                    ot.AdaptiveStieltjesAlgorithm(marginal))
                 for marginal in distributions], enumerateFunction)
        else:
            self.basis = ot.OrthogonalProductPolynomialFactory(
                [ot.StandardDistributionPolynomialFactory(margin)
                 for margin in distributions], enumerateFunction)

        self.n_basis = enumerateFunction.getStrataCumulatedCardinal(degree)

        # Strategy choice for expansion coefficient determination
        self.strategy = strategy
        if self.strategy == 'LS' or self.strategy == 'SparseLS':  # least-squares method
            self.sample = sample
        else:  # integration method
            # redefinition of sample size
            # n_samples = (degree + 1) ** self.in_dim
            # marginal degree definition
            # by default: the marginal degree for each input random
            # variable is set to the total polynomial degree 'degree'+1
            measure = self.basis.getMeasure()

            if N_quad is not None:
                degrees = [int(N_quad ** 0.25)] * self.in_dim
            else:
                degrees = [degree + 1] * self.in_dim

            self.proj_strategy = ot.IntegrationStrategy(
                ot.GaussProductExperiment(measure, degrees))
            self.sample, self.weights = self.proj_strategy.getExperiment().generateWithWeights()

            if not stieltjes:
                transformation = ot.Function(ot.MarginalTransformationEvaluation(
                    [measure.getMarginal(i) for i in range(self.in_dim)],
                    distributions, False))
                self.sample = transformation(self.sample)

        self.pc = None
        self.pc_result = None
예제 #6
0
#! /usr/bin/env python

import openturns as ot
from math import log

Sample = ot.NumericalSample.ImportFromTextFile("DATA_test2.csv", ",")
# Sample.setDescription(["BIO","SAL","pH","K","Na","Zn"])
print(Sample)

X = Sample[:, 1:6]
Y = Sample[:, 0]

################################################################################################
# Build a model BIO~SAL+pH+K+Na+Zn
dim = X.getDimension()
enumerateFunction = ot.EnumerateFunction(dim)
factory = ot.OrthogonalProductPolynomialFactory([ot.MonomialFactory()]*dim, enumerateFunction)

# Build 'interactions' as a list of list [a1,a2,a3,a4,a5], and we will generate tensorized
# polynomials SAL^a1*pH^a2*K^a3*Na^a4*Zn^a5.

# BIO~SAL+pH+K+Na+Zn
interactions = []
interactions.append([0]*dim)
for i in xrange(dim):
  indices = [0]*dim
  indices[i] = 1
  # Y ~ I(Xi)^1
  interactions.append(indices[:])

basis = ot.Basis([factory.build(enumerateFunction.inverse(indices)) for indices in interactions])
    # Get input & output sample
    lhs = ot.LHSExperiment(distribution, samplingSize)
    inputSample = lhs.generate()
    outputSample = model(inputSample)

    # Validation of results on independent samples
    validationSize = 10
    inputValidation = distribution.getSample(validationSize)
    outputValidation = model(inputValidation)

    # 1) SPC algorithm
    # Create the orthogonal basis
    polynomialCollection = [ot.LegendreFactory()] * dimension

    enumerateFunction = ot.EnumerateFunction(dimension)
    productBasis = ot.OrthogonalProductPolynomialFactory(
        polynomialCollection, enumerateFunction)

    # Create the adaptive strategy
    degree = 8
    basisSize = enumerateFunction.getStrataCumulatedCardinal(degree)
    adaptiveStrategy = ot.FixedStrategy(productBasis, basisSize)

    # Select the fitting algorithm
    fittingAlgorithm = ot.KFold()
    leastSquaresFactory = ot.LeastSquaresMetaModelSelectionFactory(
        ot.LARS(), fittingAlgorithm)

    # Projection strategy
    projectionStrategy = ot.LeastSquaresStrategy(inputSample, outputSample,
예제 #8
0
        poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
            ot.HermiteFactory())
    elif dist_types[i] == 'Beta':
        poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
            ot.JacobiFactory())
    elif dist_types[i] == 'Gamma':
        poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
            ot.LaguerreFactory())
    else:
        pdf = jpdf_ot.getDistributionCollection()[i]
        algo = ot.AdaptiveStieltjesAlgorithm(pdf)
        poly_collection[i] = ot.StandardDistributionPolynomialFactory(algo)

# create multivariate basis
mv_basis = ot.OrthogonalProductPolynomialFactory(poly_collection,
                                                 ot.EnumerateFunction(N))
# get enumerate function (multi-index handling)
enum_func = mv_basis.getEnumerateFunction()

max_fcalls = np.linspace(100, 1000, 10).tolist()
meanz = []
varz = []
cv_errz_rms = []
cv_errz_max = []
fcallz = []
# cross validation sample
np.random.seed(42)
nsamples = 1000
cv_samples_in = jpdf.sample(nsamples).T
cv_samples_out = [meroND(sample) for sample in cv_samples_in]
for mfc in max_fcalls:
예제 #9
0
    def _legendre_galerkin_scheme(self,
                                  legendre_galerkin_order=10,
                                  legendre_quadrature_order=None):

        # Input checks
        if legendre_galerkin_order <= 0:
            raise ValueError('legendre_galerkin_order must be a positive ' +
                             'integer!')

        if legendre_quadrature_order is not None:
            if legendre_quadrature_order <= 0:
                raise ValueError('legendre_quadrature_order must be a ' +
                                 'positive integer!')

        # Settings
        dimension = self._lower_bound.size
        truncation_order = self._truncation_order
        galerkin_size = ot.EnumerateFunction(
            dimension).getStrataCumulatedCardinal(legendre_galerkin_order)
        if legendre_quadrature_order is None:
            legendre_quadrature_order = 2 * legendre_galerkin_order + 1

        # Check if the current settings are compatible
        if truncation_order > galerkin_size:
            raise ValueError(
                'The truncation order must be less than or ' +
                'equal to the size of the functional basis in the chosen ' +
                'Legendre Galerkin scheme. Current size of the galerkin basis '
                + 'only allows to get %d terms in the KL expansion.' %
                galerkin_size)

        # Construction of the Galerkin basis: tensorized Legendre polynomials
        tensorized_legendre_polynomial_factory = \
            ot.PolynomialFamilyCollection([ot.LegendreFactory()] * dimension)
        tensorized_legendre_polynomial_factory = \
            ot.OrthogonalProductPolynomialFactory(
                tensorized_legendre_polynomial_factory)
        tensorized_legendre_polynomials = \
            [tensorized_legendre_polynomial_factory.build(i)
             for i in range(galerkin_size)]

        # Compute matrix C coefficients using Gauss-Legendre quadrature
        polyColl = ot.PolynomialFamilyCollection([ot.LegendreFactory()] *
                                                 dimension * 2)
        polynoms = ot.OrthogonalProductPolynomialFactory(polyColl)
        U, W = polynoms.getNodesAndWeights(
            ot.Indices([legendre_quadrature_order] * dimension * 2))
        W = np.ravel(W)
        scale = (self._upper_bound - self._lower_bound) / 2.
        shift = (self._upper_bound + self._lower_bound) / 2.
        U = np.array(U)
        X = np.repeat(scale, 2) * U + np.repeat(shift, 2)

        if self.verbose:
            print('Computing matrix C...')

        try:
            available_memory = int(.9 * get_available_memory())
        except:
            if self.verbose:
                print('WRN: Available memory estimation failed! '
                      'Assuming 1Gb is available (first guess).')
            available_memory = 1024**3
        max_size = int(available_memory / 8 / galerkin_size**2)
        batch_size = min(W.size, max_size)
        if self.verbose and batch_size < W.size:
            print('RAM: %d Mb available' % (available_memory / 1024**2))
            print('RAM: %d allocable terms / %d total terms' %
                  (max_size, W.size))
            print('RAM: %d loops required' % np.ceil(float(W.size) / max_size))
        while True:
            C = np.zeros((galerkin_size, galerkin_size))
            try:
                n_done = 0
                while n_done < W.size:
                    covariance_at_X = self._covariance(X[n_done:(n_done +
                                                                 batch_size)])
                    H1 = np.vstack([
                        np.ravel(tensorized_legendre_polynomials[i](
                            U[n_done:(n_done + batch_size), :dimension]))
                        for i in range(galerkin_size)
                    ])
                    H2 = np.vstack([
                        np.ravel(tensorized_legendre_polynomials[i](
                            U[n_done:(n_done + batch_size), dimension:]))
                        for i in range(galerkin_size)
                    ])
                    C += np.sum(W[np.newaxis, np.newaxis,
                                  n_done:(n_done + batch_size)] *
                                covariance_at_X[np.newaxis, np.newaxis, :] *
                                H1[np.newaxis, :, :] * H2[:, np.newaxis, :],
                                axis=-1)
                    del covariance_at_X, H1, H2
                    n_done += batch_size
                break
            except MemoryError:
                batch_size /= 2
        C *= np.prod(self._upper_bound - self._lower_bound)**2.

        # Matrix B is orthonormal up to some constant
        B = np.diag(
            np.repeat(np.prod(self._upper_bound - self._lower_bound),
                      galerkin_size))

        # Solve the generalized eigenvalue problem C D = L B D in L, D
        if self.verbose:
            print('Solving generalized eigenvalue problem...')
        eigenvalues, eigenvectors = linalg.eigh(C, b=B, lower=True)
        eigenvalues, eigenvectors = eigenvalues.real, eigenvectors.real

        # Sort the eigensolutions in the descending order of eigenvalues
        order = eigenvalues.argsort()[::-1]
        eigenvalues = eigenvalues[order]
        eigenvectors = eigenvectors[:, order]

        # Truncate the expansion
        eigenvalues = eigenvalues[:truncation_order]
        eigenvectors = eigenvectors[:, :truncation_order]

        # Eliminate unsignificant negative eigenvalues
        if eigenvalues.min() <= 0.:
            if eigenvalues.min() > .01 * eigenvalues.max():
                raise Exception(
                    'The smallest significant eigenvalue seems ' +
                    'to be negative... Check the positive definiteness of the '
                    + 'covariance function.')
            else:
                truncation_order = np.nonzero(eigenvalues <= 0)[0][0]
                eigenvalues = eigenvalues[:truncation_order]
                eigenvectors = eigenvectors[:, :truncation_order]
                self._truncation_order = truncation_order
                print('WRN: truncation_order was too large.')
                print('It has been reset to: %d' % truncation_order)

        # Define eigenfunctions
        class LegendrePolynomialsBasedEigenFunction():
            def __init__(self, vector):
                self._vector = vector

            def __call__(self, x):
                x = np.asanyarray(x)
                if x.ndim <= 1:
                    x = np.atleast_2d(x).T
                u = (x - shift) / scale
                return np.sum([
                    np.ravel(tensorized_legendre_polynomials[i](u)) *
                    self._vector[i] for i in range(truncation_order)
                ],
                              axis=0)

        # Set attributes
        self._eigenvalues = eigenvalues
        self._eigenfunctions = [
            LegendrePolynomialsBasedEigenFunction(vector)
            for vector in eigenvectors.T
        ]
        self._legendre_galerkin_order = legendre_galerkin_order
        self._legendre_quadrature_order = legendre_quadrature_order
예제 #10
0
graph2 = metaModel(validationInputSample).drawMarginal(0)
graph2.setColors(['blue'])
graph.add(graph2)
graph.setTitle('Comparaison modele/meta-modele')
graph.setXTitle(r'$t$')
graph.setYTitle(r'$z$')
otv.View(graph)

# Second, using a more evolved interface
basis = ot.OrthogonalProductPolynomialFactory([
    ot.StandardDistributionPolynomialFactory(distX.getMarginal(i))
    for i in range(distX.getDimension())
])
adaptiveStrategy = ot.FixedStrategy(
    basis,
    ot.EnumerateFunction(distX.getDimension()).getStrataCumulatedCardinal(6))
projectionStrategy = ot.LeastSquaresStrategy(
    ot.LeastSquaresMetaModelSelectionFactory(ot.LARS(),
                                             ot.CorrectedLeaveOneOut()))
algo = ot.FunctionalChaosAlgorithm(inputSample, outputSampleChaos, distX,
                                   adaptiveStrategy, projectionStrategy)
algo.run()
metaModel = ot.PointToFieldConnection(postProcessing,
                                      algo.getResult().getMetaModel())

graph = validationOutputSample.drawMarginal(0)
graph.setColors(['red'])
graph2 = metaModel(validationInputSample).drawMarginal(0)
graph2.setColors(['blue'])
graph.add(graph2)
graph.setTitle('Comparaison modele/meta-modele')
예제 #11
0
# Input distribution
distribution = ot.ComposedDistribution([ot.Normal()] * inputDimension)

# Correlated input distribution
S = ot.CorrelationMatrix(inputDimension)
S[1, 0] = 0.3
R = ot.NormalCopula().GetCorrelationFromSpearmanCorrelation(S)
myCopula = ot.NormalCopula(R)
myCorrelatedInputDistribution = ot.ComposedDistribution(
    [ot.Normal()] * inputDimension, myCopula)

sample = myCorrelatedInputDistribution.getSample(2000)

# Orthogonal basis
enumerateFunction = ot.EnumerateFunction(inputDimension)
productBasis = ot.OrthogonalProductPolynomialFactory(
    [ot.HermiteFactory()] * inputDimension, enumerateFunction)
# Adaptive strategy
adaptiveStrategy = ot.FixedStrategy(
    productBasis, enumerateFunction.getStrataCumulatedCardinal(4))
# Projection strategy
samplingSize = 250
projectionStrategy = ot.LeastSquaresStrategy(
    ot.MonteCarloExperiment(samplingSize))

# Polynomial chaos algorithm
algo = ot.FunctionalChaosAlgorithm(model, distribution, adaptiveStrategy,
                                   projectionStrategy)
algo.run()