Exemple #1
0
    def __init__(self, beta_coeff, idx_set, jpdf):
        self.beta_coeff = beta_coeff
        self.idx_set = idx_set
        self.jpdf = jpdf
        self.N = jpdf.getDimension()

        # get the distribution type of each random variable
        dist_types = []
        for i in range(self.N):
            dist_type = self.jpdf.getMarginal(i).getName()
            dist_types.append(dist_type)

        # create orthogonal univariate bases
        poly_collection = ot.PolynomialFamilyCollection(self.N)
        for i in range(self.N):
            pdf = jpdf.getDistributionCollection()[i]
            algo = ot.AdaptiveStieltjesAlgorithm(pdf)
            poly_collection[i] = ot.StandardDistributionPolynomialFactory(algo)

        # create multivariate basis
        multivariate_basis = ot.OrthogonalProductPolynomialFactory(
            poly_collection, ot.EnumerateFunction(self.N))
        # get enumerate function (multi-index handling)
        enum_func = multivariate_basis.getEnumerateFunction()
        # get epansion
        self.expansion = multivariate_basis.getSubBasis(
            transform_multi_index_set(idx_set, enum_func))
        # create openturns surrogate model
        sur_model = ot.FunctionCollection()
        for i in range(len(self.expansion)):
            multi = str(beta_coeff[i]) + '*x'
            help_function = ot.SymbolicFunction(['x'], [multi])
            sur_model.add(ot.ComposedFunction(help_function,
                                              self.expansion[i]))
        self.surrogate_model = np.sum(sur_model)
Exemple #2
0
def dali_pce(func,
             N,
             jpdf_cp,
             jpdf_ot,
             tol=1e-12,
             max_fcalls=1000,
             verbose=True,
             interp_dict={}):

    if not interp_dict:  # if dictionary is empty --> cold-start
        idx_act = []  # M_activated x N
        idx_adm = []  # M_admissible x N
        fevals_act = []  # M_activated x 1
        fevals_adm = []  # M_admissible x 1
        coeff_act = []  # M_activated x 1
        coeff_adm = []  #  M_admissible x 1

        # start with 0 multi-index
        knot0 = []
        for n in range(N):
            # get knots per dimension based on maximum index
            kk, ww = seq_lj_1d(order=0, dist=jpdf_cp[n])
            knot0.append(kk[0])
        feval = func(knot0)

        # update activated sets
        idx_act.append([0] * N)
        coeff_act.append(feval)
        fevals_act.append(feval)

        # local error indicators
        local_error_indicators = np.abs(coeff_act)

        # get the OT distribution type of each random variable
        dist_types = []
        for i in range(N):
            dist_type = jpdf_ot.getMarginal(i).getName()
            dist_types.append(dist_type)

        # create orthogonal univariate bases
        poly_collection = ot.PolynomialFamilyCollection(N)
        for i in range(N):
            if dist_types[i] == 'Uniform':
                poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
                    ot.LegendreFactory())
            elif dist_types[i] == 'Normal':
                poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
                    ot.HermiteFactory())
            elif dist_types[i] == 'Beta':
                poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
                    ot.JacobiFactory())
            elif dist_types[i] == 'Gamma':
                poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
                    ot.LaguerreFactory())
            else:
                pdf = jpdf_ot.getDistributionCollection()[i]
                algo = ot.AdaptiveStieltjesAlgorithm(pdf)
                poly_collection[i] = ot.StandardDistributionPolynomialFactory(
                    algo)

        # create multivariate basis
        mv_basis = ot.OrthogonalProductPolynomialFactory(
            poly_collection, ot.EnumerateFunction(N))
        # get enumerate function (multi-index handling)
        enum_func = mv_basis.getEnumerateFunction()

    else:
        idx_act = interp_dict['idx_act']
        idx_adm = interp_dict['idx_adm']
        coeff_act = interp_dict['coeff_act']
        coeff_adm = interp_dict['coeff_adm']
        fevals_act = interp_dict['fevals_act']
        fevals_adm = interp_dict['fevals_adm']
        mv_basis = interp_dict['mv_basis']
        enum_func = interp_dict['enum_func']
        # local error indicators
        local_error_indicators = np.abs(coeff_adm)

    # compute global error indicator
    global_error_indicator = local_error_indicators.sum()  # max or sum

    # fcalls / M approx. terms up to now
    fcalls = len(idx_act) + len(idx_adm)  # fcalls = M --> approx. terms

    # maximum index per dimension
    max_idx_per_dim = np.max(idx_act + idx_adm, axis=0)

    # univariate knots and polynomials per dimension
    knots_per_dim = {}
    for n in range(N):
        kk, ww = seq_lj_1d(order=max_idx_per_dim[n], dist=jpdf_cp[n])
        knots_per_dim[n] = kk

    # start iterations
    while global_error_indicator > tol and fcalls < max_fcalls:
        if verbose:
            print(fcalls)
            print(global_error_indicator)

        # the index added last to the activated set is the one to be refined
        last_act_idx = idx_act[-1][:]
        # compute the knot corresponding to the lastly added index
        last_knot = [
            knots_per_dim[n][i] for n, i in zip(range(N), last_act_idx)
        ]
        # get admissible neighbors of the lastly added index
        adm_neighbors = admissible_neighbors(last_act_idx, idx_act)

        for an in adm_neighbors:
            # update admissible index set
            idx_adm.append(an)
            # find which parameter/direction n (n=1,2,...,N) gets refined
            n_ref = np.argmin(
                [idx1 == idx2 for idx1, idx2 in zip(an, last_act_idx)])
            # sequence of 1d Leja nodes/weights for the given refinement
            knots_n, weights_n = seq_lj_1d(an[n_ref], jpdf_cp[int(n_ref)])

            # update max_idx_per_dim, knots_per_dim, if necessary
            if an[n_ref] > max_idx_per_dim[n_ref]:
                max_idx_per_dim[n_ref] = an[n_ref]
                knots_per_dim[n_ref] = knots_n

            # find new_knot and compute function on new_knot
            new_knot = last_knot[:]
            new_knot[n_ref] = knots_n[-1]
            feval = func(new_knot)
            fevals_adm.append(feval)
            fcalls += 1  # update function calls

        # create PCE basis
        idx_system = idx_act + idx_adm
        idx_system_single = transform_multi_index_set(idx_system, enum_func)
        system_basis = mv_basis.getSubBasis(idx_system_single)
        # get corresponding evaluations
        fevals_system = fevals_act + fevals_adm
        # multi-dimensional knots
        M = len(idx_system)  # equations terms
        knots_md = [[knots_per_dim[n][idx_system[m][n]] for m in range(M)]
                    for n in range(N)]
        knots_md = np.array(knots_md).T
        # design matrix
        D = get_design_matrix(system_basis, knots_md)
        # solve system of equaations
        Q, R = scl.qr(D, mode='economic')
        c = Q.T.dot(fevals_system)
        coeff_system = scl.solve_triangular(R, c)

        # find the multi-index with the largest contribution, add it to idx_act
        # and delete it from idx_adm
        coeff_act = coeff_system[:len(idx_act)].tolist()
        coeff_adm = coeff_system[-len(idx_adm):].tolist()
        help_idx = np.argmax(np.abs(coeff_adm))
        idx_add = idx_adm.pop(help_idx)
        pce_coeff_add = coeff_adm.pop(help_idx)
        fevals_add = fevals_adm.pop(help_idx)
        idx_act.append(idx_add)
        coeff_act.append(pce_coeff_add)
        fevals_act.append(fevals_add)
        # re-compute coefficients of admissible multi-indices

        # local error indicators
        local_error_indicators = np.abs(coeff_adm)
        # compute global error indicator
        global_error_indicator = local_error_indicators.sum()  # max or sum

    # store expansion data in dictionary
    interp_dict = {}
    interp_dict['idx_act'] = idx_act
    interp_dict['idx_adm'] = idx_adm
    interp_dict['coeff_act'] = coeff_act
    interp_dict['coeff_adm'] = coeff_adm
    interp_dict['fevals_act'] = fevals_act
    interp_dict['fevals_adm'] = fevals_adm
    interp_dict['enum_func'] = enum_func
    interp_dict['mv_basis'] = mv_basis
    return interp_dict
Exemple #3
0
def alsace(func,
           N,
           jpdf,
           tol=1e-22,
           sample_type='R',
           limit_cond=5,
           max_fcalls=1000,
           seed=123,
           ed_file=None,
           ed_fevals_file=None,
           verbose=True,
           pce_dict={}):
    """
    ALSACE - Approximations via Lower-Set and Least-Squares-based Adaptive Chaos Expansions

    func: function to be approximated.
    N: number of parameters.
    jpdf: joint probability density function.
    limit_cond: maximum allowed condition number of tr(inv(D.T*D))
    sample_type: 'R'-random, 'L'-LHS
    seed: sampling seed
    tol, max_fcalls: exit criteria, self-explanatory.
    ed_file, ed_fevals_file: experimental design and corresponding evaluations

    'act': activated, i.e. already part of the approximation.
    'adm': admissible, i.e. candidates for the approximation's expansion.
    """

    if not pce_dict:  # if pce_dict is empty --> cold-start
        idx_act = []
        idx_act.append([0] * N)  # start with 0 multi-index
        idx_adm = []
        # set seed
        ot.RandomGenerator.SetSeed(seed)
        ed_size = 2 * N  # initial number of samples
        # initial experimental design and coresponding evaluations
        ed, ed_fevals = get_ed(func,
                               jpdf,
                               ed_size,
                               sample_type=sample_type,
                               knots=[],
                               values=[],
                               ed_file=ed_file,
                               ed_fevals_file=ed_fevals_file)
        global_error_indicator = 1.0  # give arbitrary sufficiently large value

        # get the distribution type of each random variable
        dist_types = []
        for i in range(N):
            dist_type = jpdf.getMarginal(i).getName()
            dist_types.append(dist_type)

        # create orthogonal univariate bases
        poly_collection = ot.PolynomialFamilyCollection(N)
        for i in range(N):
            pdf = jpdf.getDistributionCollection()[i]
            algo = ot.AdaptiveStieltjesAlgorithm(pdf)
            poly_collection[i] = ot.StandardDistributionPolynomialFactory(algo)

        # create multivariate basis
        mv_basis = ot.OrthogonalProductPolynomialFactory(
            poly_collection, ot.EnumerateFunction(N))
        # get enumerate function (multi-index handling)
        enum_func = mv_basis.getEnumerateFunction()

    else:  # get data from dictionary
        idx_act = pce_dict['idx_act']
        idx_adm = pce_dict['idx_adm']
        pce_coeff_act = pce_dict['pce_coeff_act']
        pce_coeff_adm = pce_dict['pce_coeff_adm']
        ed = pce_dict['ed']
        ed_fevals = pce_dict['ed_fevals']
        ed_size = len(ed_fevals)
        # compute local and global error indicators
        global_error_indicator = np.sum(np.array(pce_coeff_adm)**2)
        enum_func = pce_dict['enum_func']
        mv_basis = pce_dict['mv_basis']

    #
    while ed_size < max_fcalls and global_error_indicator > tol:
        # the index added last to the activated set is the one to be refined
        last_act_idx = idx_act[-1][:]
        # get admissible neighbors of the lastly added index
        adm_neighbors = admissible_neighbors(last_act_idx, idx_act)
        # update admissible indices
        idx_adm = idx_adm + adm_neighbors
        # get polynomial basis for the LS problem
        idx_ls = idx_act + idx_adm
        idx_ls_single = transform_multi_index_set(idx_ls, enum_func)
        ls_basis = mv_basis.getSubBasis(idx_ls_single)
        ls_basis_size = len(ls_basis)

        # construct the design matrix D and compute its QR decomposition
        D = get_design_matrix(ls_basis, ed)
        Q, R = sp.qr(D, mode='economic')
        # construct information matrix A= D^T*D
        A = np.matmul(D.T, D) / ed_size
        trAinv_test = np.sum(1. / np.linalg.eig(A)[0])
        trAinv = np.trace(np.linalg.inv(A))
        print('new trace ', trAinv_test)
        print('old trace ', trAinv)

        # If tr(A) becomes too large, enrich the ED until tr(A) becomes
        # acceptable or until ed_size reaches max_fcalls
        while (trAinv > limit_cond
               and ed_size < max_fcalls) or ed_size < ls_basis_size:
            # inform user
            if verbose:
                print('WARNING: tr(inv(A)) = ', trAinv)
                print('WARNING: cond(D) = ', np.linalg.cond(D))
                print("")
            # select new size for the ED
            if ls_basis_size > ed_size:
                ed_size = ls_basis_size + N
            elif ed_size + N > max_fcalls:
                ed_size = max_fcalls
            else:
                ed_size = ed_size + N
            # expand ED
            ed, ed_fevals = get_ed(func,
                                   jpdf,
                                   ed_size,
                                   sample_type=sample_type,
                                   knots=ed,
                                   values=ed_fevals,
                                   ed_file=ed_file,
                                   ed_fevals_file=ed_fevals_file)
            # construct the design matrix D and compute its QR decomposition
            D = get_design_matrix(ls_basis, ed)
            Q, R = sp.qr(D, mode='economic')
            # construct information matrix A= D^T*D
            A = np.matmul(D.T, D) / ed_size
            trAinv = np.trace(np.linalg.inv(A))

        # solve LS problem
        c = Q.T.dot(ed_fevals)
        pce_coeff_ls = sp.solve_triangular(R, c)

        # find the multi-index with the largest contribution, add it to idx_act
        # and delete it from idx_adm
        pce_coeff_act = pce_coeff_ls[:len(idx_act)].tolist()
        pce_coeff_adm = pce_coeff_ls[-len(idx_adm):].tolist()
        help_idx = np.argmax(np.abs(pce_coeff_adm))
        idx_add = idx_adm.pop(help_idx)
        pce_coeff_add = pce_coeff_adm.pop(help_idx)
        idx_act.append(idx_add)
        pce_coeff_act.append(pce_coeff_add)

    # store expansion data in dictionary
    pce_dict = {}
    pce_dict['idx_act'] = idx_act
    pce_dict['idx_adm'] = idx_adm
    pce_dict['pce_coeff_act'] = pce_coeff_act
    pce_dict['pce_coeff_adm'] = pce_coeff_adm
    pce_dict['ed'] = ed
    pce_dict['ed_fevals'] = ed_fevals
    pce_dict['enum_func'] = enum_func
    pce_dict['mv_basis'] = mv_basis

    return pce_dict
Exemple #4
0
    def __init__(self, strategy, degree, distributions, N_quad=None, sample=None,
                 stieltjes=True, sparse_param={}):
        """Generate truncature and projection strategies.

        Allong with the strategies the sample is storred as an attribute.
        :attr:`sample` as well as corresponding weights: :attr:`weights`.

        :param str strategy: Least square or Quadrature ['LS', 'Quad', 'SparseLS'].
        :param int degree: Polynomial degree.
        :param  distributions: Distributions of each input parameter.
        :type distributions: lst(:class:`openturns.Distribution`)
        :param array_like sample: Samples for least square
          (n_samples, n_features).
        :param bool stieltjes: Wether to use Stieltjes algorithm for the basis.
        :param dict sparse_param: Parameters for the Sparse Cleaning Truncation
          Strategy and/or hyperbolic truncation of the initial basis.

            - **max_considered_terms** (int) -- Maximum Considered Terms,
            - **most_significant** (int), Most Siginificant number to retain,
            - **significance_factor** (float), Significance Factor,
            - **hyper_factor** (float), factor for hyperbolic truncation
              strategy.
        """
        # distributions
        self.in_dim = len(distributions)
        self.dist = ot.ComposedDistribution(distributions)
        self.sparse_param = sparse_param

        if 'hyper_factor' in self.sparse_param:
            enumerateFunction = ot.EnumerateFunction(self.in_dim, self.sparse_param['hyper_factor'])
        else:
            enumerateFunction = ot.EnumerateFunction(self.in_dim)

        if stieltjes:
            # Tend to result in performance issue
            self.basis = ot.OrthogonalProductPolynomialFactory(
                [ot.StandardDistributionPolynomialFactory(
                    ot.AdaptiveStieltjesAlgorithm(marginal))
                 for marginal in distributions], enumerateFunction)
        else:
            self.basis = ot.OrthogonalProductPolynomialFactory(
                [ot.StandardDistributionPolynomialFactory(margin)
                 for margin in distributions], enumerateFunction)

        self.n_basis = enumerateFunction.getStrataCumulatedCardinal(degree)

        # Strategy choice for expansion coefficient determination
        self.strategy = strategy
        if self.strategy == 'LS' or self.strategy == 'SparseLS':  # least-squares method
            self.sample = sample
        else:  # integration method
            # redefinition of sample size
            # n_samples = (degree + 1) ** self.in_dim
            # marginal degree definition
            # by default: the marginal degree for each input random
            # variable is set to the total polynomial degree 'degree'+1
            measure = self.basis.getMeasure()

            if N_quad is not None:
                degrees = [int(N_quad ** 0.25)] * self.in_dim
            else:
                degrees = [degree + 1] * self.in_dim

            self.proj_strategy = ot.IntegrationStrategy(
                ot.GaussProductExperiment(measure, degrees))
            self.sample, self.weights = self.proj_strategy.getExperiment().generateWithWeights()

            if not stieltjes:
                transformation = ot.Function(ot.MarginalTransformationEvaluation(
                    [measure.getMarginal(i) for i in range(self.in_dim)],
                    distributions, False))
                self.sample = transformation(self.sample)

        self.pc = None
        self.pc_result = None
Exemple #5
0
    ot.Student(22.0),
    ot.Triangular(-1.0, 0.3, 1.0),
    ot.Uniform(-1.0, 1.0),
    ot.Uniform(-1.0, 3.0),
    ot.Weibull(1.0, 3.0),
    ot.Beta(1.0, 3.0, -1.0, 1.0),
    ot.Beta(0.5, 1.0, -1.0, 1.0),
    ot.Beta(0.5, 1.0, -2.0, 3.0),
    ot.Gamma(1.0, 3.0),
    ot.Arcsine()
]
for n in range(len(distributionCollection)):
    distribution = distributionCollection[n]
    name = distribution.getClassName()
    polynomialFactory = ot.StandardDistributionPolynomialFactory(
        ot.AdaptiveStieltjesAlgorithm(distribution))
    print("polynomialFactory(", name, "=", polynomialFactory, ")")
    for i in range(iMax):
        print(name, " polynomial(", i, ")=", clean(polynomialFactory.build(i)))
    roots = polynomialFactory.getRoots(iMax - 1)
    print(name, " polynomial(", iMax - 1, ") roots=", roots)
    nodes, weights = polynomialFactory.getNodesAndWeights(iMax - 1)
    print(name, " polynomial(", iMax - 1, ") nodes=", nodes, " and weights=",
          weights)
    M = ot.SymmetricMatrix(iMax)
    for i in range(iMax):
        pI = polynomialFactory.build(i)
        for j in range(i + 1):
            pJ = polynomialFactory.build(j)

            def kernel(x):
Exemple #6
0
for i in range(N):
    if dist_types[i] == 'Uniform':
        poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
            ot.LegendreFactory())
    elif dist_types[i] == 'Normal':
        poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
            ot.HermiteFactory())
    elif dist_types[i] == 'Beta':
        poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
            ot.JacobiFactory())
    elif dist_types[i] == 'Gamma':
        poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
            ot.LaguerreFactory())
    else:
        pdf = jpdf_ot.getDistributionCollection()[i]
        algo = ot.AdaptiveStieltjesAlgorithm(pdf)
        poly_collection[i] = ot.StandardDistributionPolynomialFactory(algo)

# create multivariate basis
mv_basis = ot.OrthogonalProductPolynomialFactory(poly_collection,
                                                 ot.EnumerateFunction(N))
# get enumerate function (multi-index handling)
enum_func = mv_basis.getEnumerateFunction()

max_fcalls = np.linspace(100, 1000, 10).tolist()
meanz = []
varz = []
cv_errz_rms = []
cv_errz_max = []
fcallz = []
# cross validation sample