#! /usr/bin/env python

from __future__ import print_function
import openturns as ot

try:
    ot.ResourceMap.SetAsScalar(
        "LinearCombinationEvaluation-SmallCoefficient", 1.0e-10)
    domain = ot.Interval(-1.0, 1.0)
    basis = ot.OrthogonalProductPolynomialFactory([ot.LegendreFactory()])
    basisSize = 5
    experiment = ot.LHSExperiment(basis.getMeasure(), 100)
    mustScale = False
    threshold = 0.0001
    model = ot.AbsoluteExponential([1.0])
    algo = ot.KarhunenLoeveQuadratureAlgorithm(
        domain, model, experiment, basis, basisSize, mustScale, threshold)
    algo.run()
    result = algo.getResult()
    lambd = result.getEigenValues()
    KLModes = result.getModesAsProcessSample()
    print("KL modes=", KLModes)
    print("KL eigenvalues=", lambd)
    process = ot.GaussianProcess(model, KLModes.getMesh())
    sample = process.getSample(10)
    coefficients = result.project(sample)
    print("KL coefficients=", coefficients)
    KLFunctions = result.getModes()
    print("KL functions=", KLFunctions)
    print("KL lift=", result.lift(coefficients[0]))
    print("KL lift as field=", result.liftAsField(coefficients[0]))
Ejemplo n.º 2
0
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot

# Polynomial factories
factoryCollection = [
    ot.LaguerreFactory(2.5),
    ot.LegendreFactory(),
    ot.HermiteFactory()
]
dim = len(factoryCollection)
basisFactory = ot.OrthogonalProductPolynomialFactory(factoryCollection)
basis = ot.OrthogonalBasis(basisFactory)
print('basis=', basis)
x = [0.5] * dim
for i in range(10):
    f = basis.build(i)
    print('i=', i, 'f(X)=', f(x))

# Using multi-indices
enum = basis.getEnumerateFunction()
for i in range(10):
    indices = enum(i)
    f = basis.build(indices)
    print('indices=', indices, 'f(X)=', f(x))

# Other factories
factoryCollection = [
    ot.OrthogonalUniVariatePolynomialFunctionFactory(ot.LaguerreFactory(2.5)),
    ot.HaarWaveletFactory(),
# %%
sampleSize_train = 20
X_train = myDistribution.getSample(sampleSize_train)
Y_train = model(X_train)

# %%
# Create the Legendre basis
# -------------------------
#
# We first create a Legendre basis of univariate polynomials. In order to convert then into multivariate polynomials, we use a linear enumerate function.
#
# The `LegendreFactory` class creates Legendre polynomials. 

# %%
univariateFactory = ot.LegendreFactory()

# %%
# This factory corresponds to the `Uniform` distribution in the [-1,1] interval. 

# %%
univariateFactory.getMeasure()

# %%
# This interval does not correspond to the interval on which the input marginals are defined (we will come back to this topic later), but this will, anyway, create a consistent trend for the kriging.

# %%
polyColl = [univariateFactory]*dimension

# %%
enumerateFunction = ot.LinearEnumerateFunction(dimension)
Ejemplo n.º 4
0
def dali_pce(func,
             N,
             jpdf_cp,
             jpdf_ot,
             tol=1e-12,
             max_fcalls=1000,
             verbose=True,
             interp_dict={}):

    if not interp_dict:  # if dictionary is empty --> cold-start
        idx_act = []  # M_activated x N
        idx_adm = []  # M_admissible x N
        fevals_act = []  # M_activated x 1
        fevals_adm = []  # M_admissible x 1
        coeff_act = []  # M_activated x 1
        coeff_adm = []  #  M_admissible x 1

        # start with 0 multi-index
        knot0 = []
        for n in range(N):
            # get knots per dimension based on maximum index
            kk, ww = seq_lj_1d(order=0, dist=jpdf_cp[n])
            knot0.append(kk[0])
        feval = func(knot0)

        # update activated sets
        idx_act.append([0] * N)
        coeff_act.append(feval)
        fevals_act.append(feval)

        # local error indicators
        local_error_indicators = np.abs(coeff_act)

        # get the OT distribution type of each random variable
        dist_types = []
        for i in range(N):
            dist_type = jpdf_ot.getMarginal(i).getName()
            dist_types.append(dist_type)

        # create orthogonal univariate bases
        poly_collection = ot.PolynomialFamilyCollection(N)
        for i in range(N):
            if dist_types[i] == 'Uniform':
                poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
                    ot.LegendreFactory())
            elif dist_types[i] == 'Normal':
                poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
                    ot.HermiteFactory())
            elif dist_types[i] == 'Beta':
                poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
                    ot.JacobiFactory())
            elif dist_types[i] == 'Gamma':
                poly_collection[i] = ot.OrthogonalUniVariatePolynomialFamily(
                    ot.LaguerreFactory())
            else:
                pdf = jpdf_ot.getDistributionCollection()[i]
                algo = ot.AdaptiveStieltjesAlgorithm(pdf)
                poly_collection[i] = ot.StandardDistributionPolynomialFactory(
                    algo)

        # create multivariate basis
        mv_basis = ot.OrthogonalProductPolynomialFactory(
            poly_collection, ot.EnumerateFunction(N))
        # get enumerate function (multi-index handling)
        enum_func = mv_basis.getEnumerateFunction()

    else:
        idx_act = interp_dict['idx_act']
        idx_adm = interp_dict['idx_adm']
        coeff_act = interp_dict['coeff_act']
        coeff_adm = interp_dict['coeff_adm']
        fevals_act = interp_dict['fevals_act']
        fevals_adm = interp_dict['fevals_adm']
        mv_basis = interp_dict['mv_basis']
        enum_func = interp_dict['enum_func']
        # local error indicators
        local_error_indicators = np.abs(coeff_adm)

    # compute global error indicator
    global_error_indicator = local_error_indicators.sum()  # max or sum

    # fcalls / M approx. terms up to now
    fcalls = len(idx_act) + len(idx_adm)  # fcalls = M --> approx. terms

    # maximum index per dimension
    max_idx_per_dim = np.max(idx_act + idx_adm, axis=0)

    # univariate knots and polynomials per dimension
    knots_per_dim = {}
    for n in range(N):
        kk, ww = seq_lj_1d(order=max_idx_per_dim[n], dist=jpdf_cp[n])
        knots_per_dim[n] = kk

    # start iterations
    while global_error_indicator > tol and fcalls < max_fcalls:
        if verbose:
            print(fcalls)
            print(global_error_indicator)

        # the index added last to the activated set is the one to be refined
        last_act_idx = idx_act[-1][:]
        # compute the knot corresponding to the lastly added index
        last_knot = [
            knots_per_dim[n][i] for n, i in zip(range(N), last_act_idx)
        ]
        # get admissible neighbors of the lastly added index
        adm_neighbors = admissible_neighbors(last_act_idx, idx_act)

        for an in adm_neighbors:
            # update admissible index set
            idx_adm.append(an)
            # find which parameter/direction n (n=1,2,...,N) gets refined
            n_ref = np.argmin(
                [idx1 == idx2 for idx1, idx2 in zip(an, last_act_idx)])
            # sequence of 1d Leja nodes/weights for the given refinement
            knots_n, weights_n = seq_lj_1d(an[n_ref], jpdf_cp[int(n_ref)])

            # update max_idx_per_dim, knots_per_dim, if necessary
            if an[n_ref] > max_idx_per_dim[n_ref]:
                max_idx_per_dim[n_ref] = an[n_ref]
                knots_per_dim[n_ref] = knots_n

            # find new_knot and compute function on new_knot
            new_knot = last_knot[:]
            new_knot[n_ref] = knots_n[-1]
            feval = func(new_knot)
            fevals_adm.append(feval)
            fcalls += 1  # update function calls

        # create PCE basis
        idx_system = idx_act + idx_adm
        idx_system_single = transform_multi_index_set(idx_system, enum_func)
        system_basis = mv_basis.getSubBasis(idx_system_single)
        # get corresponding evaluations
        fevals_system = fevals_act + fevals_adm
        # multi-dimensional knots
        M = len(idx_system)  # equations terms
        knots_md = [[knots_per_dim[n][idx_system[m][n]] for m in range(M)]
                    for n in range(N)]
        knots_md = np.array(knots_md).T
        # design matrix
        D = get_design_matrix(system_basis, knots_md)
        # solve system of equaations
        Q, R = scl.qr(D, mode='economic')
        c = Q.T.dot(fevals_system)
        coeff_system = scl.solve_triangular(R, c)

        # find the multi-index with the largest contribution, add it to idx_act
        # and delete it from idx_adm
        coeff_act = coeff_system[:len(idx_act)].tolist()
        coeff_adm = coeff_system[-len(idx_adm):].tolist()
        help_idx = np.argmax(np.abs(coeff_adm))
        idx_add = idx_adm.pop(help_idx)
        pce_coeff_add = coeff_adm.pop(help_idx)
        fevals_add = fevals_adm.pop(help_idx)
        idx_act.append(idx_add)
        coeff_act.append(pce_coeff_add)
        fevals_act.append(fevals_add)
        # re-compute coefficients of admissible multi-indices

        # local error indicators
        local_error_indicators = np.abs(coeff_adm)
        # compute global error indicator
        global_error_indicator = local_error_indicators.sum()  # max or sum

    # store expansion data in dictionary
    interp_dict = {}
    interp_dict['idx_act'] = idx_act
    interp_dict['idx_adm'] = idx_adm
    interp_dict['coeff_act'] = coeff_act
    interp_dict['coeff_adm'] = coeff_adm
    interp_dict['fevals_act'] = fevals_act
    interp_dict['fevals_adm'] = fevals_adm
    interp_dict['enum_func'] = enum_func
    interp_dict['mv_basis'] = mv_basis
    return interp_dict
Ejemplo n.º 5
0
# Fix sampling size
samplingSize = 100

# Get input & output sample
lhs = ot.LHSExperiment(distribution, samplingSize)
inputSample = lhs.generate()
outputSample = model(inputSample)

# Validation of results on independent samples
validationSize = 10
inputValidation = distribution.getSample(validationSize)
outputValidation = model(inputValidation)

# 1) SPC algorithm
# Create the orthogonal basis
polynomialCollection = [ot.LegendreFactory()] * dimension

enumerateFunction = ot.LinearEnumerateFunction(dimension)
productBasis = ot.OrthogonalProductPolynomialFactory(
    polynomialCollection, enumerateFunction)

# Create the adaptive strategy
degree = 8
basisSize = enumerateFunction.getStrataCumulatedCardinal(degree)
adaptiveStrategy = ot.FixedStrategy(productBasis, basisSize)

# Select the fitting algorithm
fittingAlgorithm = ot.KFold()
leastSquaresFactory = ot.LeastSquaresMetaModelSelectionFactory(
    ot.LARS(), fittingAlgorithm)
Ejemplo n.º 6
0
# Retrieve optimal design
input_database = algo.generate()

result = algo.getResult()

print('initial design computed')

# Response of the model
print('sampling size = ', N)
output_database = ishigami_model(input_database)

# Learning input/output
# Usual chaos meta model
enumerate_function = ot.HyperbolicAnisotropicEnumerateFunction(dimension)
orthogonal_basis = ot.OrthogonalProductPolynomialFactory(
    dimension * [ot.LegendreFactory()], enumerate_function)
basis_size = 100
# Initial chaos algorithm
adaptive_strategy = ot.FixedStrategy(orthogonal_basis, basis_size)
# ProjectionStrategy ==> Sparse
fitting_algorithm = ot.KFold()
approximation_algorithm = ot.LeastSquaresMetaModelSelectionFactory(
    ot.LARS(), fitting_algorithm)
projection_strategy = ot.LeastSquaresStrategy(input_database, output_database,
                                              approximation_algorithm)
print('Surrogate model...')
distribution_ishigami = ot.ComposedDistribution(dimension *
                                                [ot.Uniform(-pi, pi)])
algo_pc = ot.FunctionalChaosAlgorithm(input_database, output_database,
                                      distribution_ishigami, adaptive_strategy,
                                      projection_strategy)
Ejemplo n.º 7
0
    # else:
    #     f = 2.0-x**2
    xarray = np.array(X, copy=False)
    return np.piecewise(xarray, [xarray < 0, xarray >= 0],
                        [lambda x: x * (x + 1.5), lambda x: 2.0 - x * x])


f = ot.PythonFunction(1, 1, func_sample=piecewise)

# %%
# Build a metamodel over each segment
degree = 5
samplingSize = 100
enumerateFunction = ot.LinearEnumerateFunction(dimension)
productBasis = ot.OrthogonalProductPolynomialFactory(
    [ot.LegendreFactory()] * dimension, enumerateFunction)
adaptiveStrategy = ot.FixedStrategy(
    productBasis, enumerateFunction.getStrataCumulatedCardinal(degree))
projectionStrategy = ot.LeastSquaresStrategy(
    ot.MonteCarloExperiment(samplingSize))

# %%
# Segment 1: (-1.0; 0.0)
d1 = ot.Uniform(-1.0, 0.0)
fc1 = ot.FunctionalChaosAlgorithm(f, d1, adaptiveStrategy, projectionStrategy)
fc1.run()
mm1 = fc1.getResult().getMetaModel()
graph = mm1.draw(-1.0, -1e-6)
view = viewer.View(graph)

# %%
Ejemplo n.º 8
0
#! /usr/bin/env python

import openturns as ot

# Polynomial factories
factoryCollection = [ot.LaguerreFactory(
    2.5), ot.LegendreFactory(), ot.HermiteFactory()]
dim = len(factoryCollection)
basisFactory = ot.OrthogonalProductPolynomialFactory(factoryCollection)
basis = ot.OrthogonalBasis(basisFactory)
print('basis=', basis)
x = [0.5] * dim
for i in range(10):
    f = basis.build(i)
    print('i=', i, 'f(X)=', f(x))

# Using multi-indices
enum = basis.getEnumerateFunction()
for i in range(10):
    indices = enum(i)
    f = basis.build(indices)
    print('indices=', indices, 'f(X)=', f(x))

# Other factories
factoryCollection = [ot.OrthogonalUniVariatePolynomialFunctionFactory(
    ot.LaguerreFactory(2.5)),
    ot.HaarWaveletFactory(), ot.FourierSeriesFactory()]
dim = len(factoryCollection)
basisFactory = ot.OrthogonalProductFunctionFactory(factoryCollection)
basis = ot.OrthogonalBasis(basisFactory)
print('basis=', basis)
Ejemplo n.º 9
0
#! /usr/bin/env python

from __future__ import print_function
import openturns as ot

polynomialFactory = ot.LegendreFactory()
factory = ot.OrthogonalUniVariatePolynomialFunctionFactory(polynomialFactory)
print(factory)
x = 0.4
for i in range(10):
    function = factory.build(i)
    print('order=', i, function, 'X=', ot.Point([x]), 'f(X)=',
          ot.Point([function(x)]), 'df(X)=', ot.Point([function.gradient(x)]),
          'd2f(X)=', ot.Point([function.hessian(x)]))
Ejemplo n.º 10
0
polyColl[1] = ot.CharlierFactory()

# %%
# We could also use the automatic selection of the polynomial which corresponds to the distribution: this is done with the `StandardDistributionPolynomialFactory` class.

# %%
for i in range(inputDimension):
    marginal = distribution.getMarginal(i)
    polyColl[i] = ot.StandardDistributionPolynomialFactory(marginal)

# %%
# In our specific case, we use specific polynomial factories.

# %%
polyColl[0] = ot.HermiteFactory()
polyColl[1] = ot.LegendreFactory()
polyColl[2] = ot.LaguerreFactory(2.75)
# Parameter for the Jacobi factory : 'Probabilty' encoded with 1
polyColl[3] = ot.JacobiFactory(2.5, 3.5, 1)

# %%
# Create the enumeration function.

# %%
# The first possibility is to use the `LinearEnumerateFunction`.

# %%
enumerateFunction = ot.LinearEnumerateFunction(inputDimension)

# %%
# Another possibility is to use the `HyperbolicAnisotropicEnumerateFunction`, which gives less weight to interactions.
Ejemplo n.º 11
0
    def _compute_coefficients_legendre(self,
                                       sample_paths,
                                       legendre_quadrature_order=None):

        dimension = self._lower_bound.size
        truncation_order = self._truncation_order
        if legendre_quadrature_order is None:
            legendre_quadrature_order = self._legendre_quadrature_order
        elif type(legendre_quadrature_order) is not int \
                or legendre_quadrature_order <= 0:
            raise ValueError('legendre_quadrature_order must be a positive ' +
                             'integer.')
        n_sample_paths = len(sample_paths)

        # Gauss-Legendre quadrature nodes and weights
        polyColl = ot.PolynomialFamilyCollection([ot.LegendreFactory()] *
                                                 dimension)
        polynoms = ot.OrthogonalProductPolynomialFactory(polyColl)
        U, W = polynoms.getNodesAndWeights(
            ot.Indices([legendre_quadrature_order] * dimension))
        W = np.ravel(W)
        U = np.array(U)
        scale = (self._upper_bound - self._lower_bound) / 2.
        shift = (self._upper_bound + self._lower_bound) / 2.
        X = scale * U + shift

        # Compute coefficients
        try:
            available_memory = int(.9 * get_available_memory())
        except:
            if self.verbose:
                print('WRN: Available memory estimation failed! '
                      'Assuming 1Gb is available (first guess).')
            available_memory = 1024**3
        max_size = int(available_memory / 8 / truncation_order /
                       n_sample_paths)
        batch_size = min(W.size, max_size)
        if self.verbose and batch_size < W.size:
            print('RAM: %d Mb available' % (available_memory / 1024**2))
            print('RAM: %d allocable terms / %d total terms' %
                  (max_size, W.size))
            print('RAM: %d loops required' % np.ceil(float(W.size) / max_size))
        while True:
            coefficients = np.zeros((n_sample_paths, truncation_order))
            try:
                n_done = 0
                while n_done < W.size:
                    sample_paths_values = np.vstack([
                        np.ravel(sample_paths[i](X[n_done:(n_done +
                                                           batch_size)]))
                        for i in range(n_sample_paths)
                    ])
                    mean_values = np.ravel(
                        self._mean(X[n_done:(n_done +
                                             batch_size)]))[np.newaxis, :]
                    centered_sample_paths_values = \
                        sample_paths_values - mean_values
                    del sample_paths_values, mean_values
                    eigenelements_values = np.vstack([
                        self._eigenfunctions[k](
                            X[n_done:(n_done + batch_size)]) /
                        np.sqrt(self._eigenvalues[k])
                        for k in range(truncation_order)
                    ])
                    coefficients += np.sum(
                        W[np.newaxis, np.newaxis, n_done:(n_done + batch_size)]
                        * centered_sample_paths_values[:, np.newaxis, :] *
                        eigenelements_values[np.newaxis, :, :],
                        axis=-1)
                    del centered_sample_paths_values, eigenelements_values
                    n_done += batch_size
                break
            except MemoryError:
                batch_size /= 2
        coefficients *= np.prod(self._upper_bound - self._lower_bound)

        return coefficients
Ejemplo n.º 12
0
    def _legendre_galerkin_scheme(self,
                                  legendre_galerkin_order=10,
                                  legendre_quadrature_order=None):

        # Input checks
        if legendre_galerkin_order <= 0:
            raise ValueError('legendre_galerkin_order must be a positive ' +
                             'integer!')

        if legendre_quadrature_order is not None:
            if legendre_quadrature_order <= 0:
                raise ValueError('legendre_quadrature_order must be a ' +
                                 'positive integer!')

        # Settings
        dimension = self._lower_bound.size
        truncation_order = self._truncation_order
        galerkin_size = ot.EnumerateFunction(
            dimension).getStrataCumulatedCardinal(legendre_galerkin_order)
        if legendre_quadrature_order is None:
            legendre_quadrature_order = 2 * legendre_galerkin_order + 1

        # Check if the current settings are compatible
        if truncation_order > galerkin_size:
            raise ValueError(
                'The truncation order must be less than or ' +
                'equal to the size of the functional basis in the chosen ' +
                'Legendre Galerkin scheme. Current size of the galerkin basis '
                + 'only allows to get %d terms in the KL expansion.' %
                galerkin_size)

        # Construction of the Galerkin basis: tensorized Legendre polynomials
        tensorized_legendre_polynomial_factory = \
            ot.PolynomialFamilyCollection([ot.LegendreFactory()] * dimension)
        tensorized_legendre_polynomial_factory = \
            ot.OrthogonalProductPolynomialFactory(
                tensorized_legendre_polynomial_factory)
        tensorized_legendre_polynomials = \
            [tensorized_legendre_polynomial_factory.build(i)
             for i in range(galerkin_size)]

        # Compute matrix C coefficients using Gauss-Legendre quadrature
        polyColl = ot.PolynomialFamilyCollection([ot.LegendreFactory()] *
                                                 dimension * 2)
        polynoms = ot.OrthogonalProductPolynomialFactory(polyColl)
        U, W = polynoms.getNodesAndWeights(
            ot.Indices([legendre_quadrature_order] * dimension * 2))
        W = np.ravel(W)
        scale = (self._upper_bound - self._lower_bound) / 2.
        shift = (self._upper_bound + self._lower_bound) / 2.
        U = np.array(U)
        X = np.repeat(scale, 2) * U + np.repeat(shift, 2)

        if self.verbose:
            print('Computing matrix C...')

        try:
            available_memory = int(.9 * get_available_memory())
        except:
            if self.verbose:
                print('WRN: Available memory estimation failed! '
                      'Assuming 1Gb is available (first guess).')
            available_memory = 1024**3
        max_size = int(available_memory / 8 / galerkin_size**2)
        batch_size = min(W.size, max_size)
        if self.verbose and batch_size < W.size:
            print('RAM: %d Mb available' % (available_memory / 1024**2))
            print('RAM: %d allocable terms / %d total terms' %
                  (max_size, W.size))
            print('RAM: %d loops required' % np.ceil(float(W.size) / max_size))
        while True:
            C = np.zeros((galerkin_size, galerkin_size))
            try:
                n_done = 0
                while n_done < W.size:
                    covariance_at_X = self._covariance(X[n_done:(n_done +
                                                                 batch_size)])
                    H1 = np.vstack([
                        np.ravel(tensorized_legendre_polynomials[i](
                            U[n_done:(n_done + batch_size), :dimension]))
                        for i in range(galerkin_size)
                    ])
                    H2 = np.vstack([
                        np.ravel(tensorized_legendre_polynomials[i](
                            U[n_done:(n_done + batch_size), dimension:]))
                        for i in range(galerkin_size)
                    ])
                    C += np.sum(W[np.newaxis, np.newaxis,
                                  n_done:(n_done + batch_size)] *
                                covariance_at_X[np.newaxis, np.newaxis, :] *
                                H1[np.newaxis, :, :] * H2[:, np.newaxis, :],
                                axis=-1)
                    del covariance_at_X, H1, H2
                    n_done += batch_size
                break
            except MemoryError:
                batch_size /= 2
        C *= np.prod(self._upper_bound - self._lower_bound)**2.

        # Matrix B is orthonormal up to some constant
        B = np.diag(
            np.repeat(np.prod(self._upper_bound - self._lower_bound),
                      galerkin_size))

        # Solve the generalized eigenvalue problem C D = L B D in L, D
        if self.verbose:
            print('Solving generalized eigenvalue problem...')
        eigenvalues, eigenvectors = linalg.eigh(C, b=B, lower=True)
        eigenvalues, eigenvectors = eigenvalues.real, eigenvectors.real

        # Sort the eigensolutions in the descending order of eigenvalues
        order = eigenvalues.argsort()[::-1]
        eigenvalues = eigenvalues[order]
        eigenvectors = eigenvectors[:, order]

        # Truncate the expansion
        eigenvalues = eigenvalues[:truncation_order]
        eigenvectors = eigenvectors[:, :truncation_order]

        # Eliminate unsignificant negative eigenvalues
        if eigenvalues.min() <= 0.:
            if eigenvalues.min() > .01 * eigenvalues.max():
                raise Exception(
                    'The smallest significant eigenvalue seems ' +
                    'to be negative... Check the positive definiteness of the '
                    + 'covariance function.')
            else:
                truncation_order = np.nonzero(eigenvalues <= 0)[0][0]
                eigenvalues = eigenvalues[:truncation_order]
                eigenvectors = eigenvectors[:, :truncation_order]
                self._truncation_order = truncation_order
                print('WRN: truncation_order was too large.')
                print('It has been reset to: %d' % truncation_order)

        # Define eigenfunctions
        class LegendrePolynomialsBasedEigenFunction():
            def __init__(self, vector):
                self._vector = vector

            def __call__(self, x):
                x = np.asanyarray(x)
                if x.ndim <= 1:
                    x = np.atleast_2d(x).T
                u = (x - shift) / scale
                return np.sum([
                    np.ravel(tensorized_legendre_polynomials[i](u)) *
                    self._vector[i] for i in range(truncation_order)
                ],
                              axis=0)

        # Set attributes
        self._eigenvalues = eigenvalues
        self._eigenfunctions = [
            LegendrePolynomialsBasedEigenFunction(vector)
            for vector in eigenvectors.T
        ]
        self._legendre_galerkin_order = legendre_galerkin_order
        self._legendre_quadrature_order = legendre_quadrature_order
Ejemplo n.º 13
0
import openturns as ot
import numpy as np
from matplotlib import pyplot as plt
n_functions = 8
function_factory = ot.LegendreFactory()
if function_factory.getClassName() == 'KrawtchoukFactory':
    function_factory = ot.LegendreFactory(n_functions, .5)
functions = [function_factory.build(i) for i in range(n_functions)]
measure = function_factory.getMeasure()
if hasattr(measure, 'getA') and hasattr(measure, 'getB'):
    x_min = measure.getA()
    x_max = measure.getB()
else:
    x_min = measure.computeQuantile(1e-3)[0]
    x_max = measure.computeQuantile(1. - 1e-3)[0]
n_points = 200
meshed_support = np.linspace(x_min, x_max, n_points)
fig = plt.figure()
ax = fig.add_subplot(111)
for i in range(n_functions):
    plt.plot(meshed_support, [functions[i](x) for x in meshed_support],
             lw=1.5,
             label='$\phi_{' + str(i) + '}(x)$')
plt.xlabel('$x$')
plt.ylabel('$\phi_i(x)$')
plt.xlim(x_min, x_max)
plt.grid()
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height * 0.9])
plt.legend(loc='upper center', bbox_to_anchor=(.5, 1.25), ncol=4)