Ejemplo n.º 1
0
    def test_adaptive_approximate_increment_degree(self):
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree = 3
        poly.set_indices(pya.compute_hyperbolic_indices(num_vars, degree))
        poly.set_coefficients(
            np.random.normal(0, 1, (poly.indices.shape[1], 1)))
        fun = poly

        max_degree = degree + 2
        result = adaptive_approximate_polynomial_chaos_increment_degree(
            fun,
            variable,
            max_degree,
            max_nsamples=31,
            cond_tol=1e4,
            sample_growth_factor=2,
            verbose=0,
            oversampling_ratio=None,
            solver_type='lstsq',
            callback=None)
        print('Ntrain samples', result.train_samples.shape[1])
        assert np.allclose(
            result.approx.coefficients[:poly.coefficients.shape[0]],
            poly.coefficients)
Ejemplo n.º 2
0
    def test_approximate_fixed_pce(self):
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree, hcross_strength = 7, 0.4
        poly.set_indices(
            pya.compute_hyperbolic_indices(num_vars, degree, hcross_strength))
        num_samples = poly.num_terms() * 2
        degrees = poly.indices.sum(axis=0)
        coef = np.random.normal(
            0, 1, (poly.indices.shape[1], 2)) / (degrees[:, np.newaxis] + 1)**2
        # set some coefficients to zero to make sure that different qoi
        # are treated correctly.
        I = np.random.permutation(coef.shape[0])[:coef.shape[0] // 2]
        coef[I, 0] = 0
        I = np.random.permutation(coef.shape[0])[:coef.shape[0] // 2]
        coef[I, 1] = 0
        poly.set_coefficients(coef)
        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)
        train_vals = poly(train_samples)

        indices = compute_hyperbolic_indices(num_vars, 1, 1)
        nfolds = 10
        method = 'polynomial_chaos'
        options = {
            'basis_type': 'fixed',
            'variable': variable,
            'options': {
                'linear_solver_options': {},
                'indices': indices,
                'solver_type': 'lstsq'
            }
        }
        approx_list, residues_list, cv_score = cross_validate_approximation(
            train_samples,
            train_vals,
            options,
            nfolds,
            method,
            random_folds=False)

        solver = LinearLeastSquaresCV(cv=nfolds, random_folds=False)
        poly.set_indices(indices)
        basis_matrix = poly.basis_matrix(train_samples)
        solver.fit(basis_matrix, train_vals[:, 0:1])
        assert np.allclose(solver.cv_score_, cv_score[0])

        solver.fit(basis_matrix, train_vals[:, 1:2])
        assert np.allclose(solver.cv_score_, cv_score[1])
Ejemplo n.º 3
0
    def test_pce_sensitivities_of_sobol_g_function(self):
        nsamples = 2000
        nvars, degree = 3, 8
        a = np.array([1, 2, 5])[:nvars]
        univariate_variables = [uniform(0, 1)] * nvars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)

        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)
        indices = pya.tensor_product_indices([degree] * nvars)
        poly.set_indices(indices)
        #print('No. PCE Terms',indices.shape[1])

        samples = pya.generate_independent_random_samples(
            var_trans.variable, nsamples)
        samples = (np.cos(np.random.uniform(0, np.pi,
                                            (nvars, nsamples))) + 1) / 2
        values = sobol_g_function(a, samples)

        basis_matrix = poly.basis_matrix(samples)
        weights = 1 / np.sum(basis_matrix**2, axis=1)[:, np.newaxis]
        coef = np.linalg.lstsq(basis_matrix * weights,
                               values * weights,
                               rcond=None)[0]
        poly.set_coefficients(coef)

        nvalidation_samples = 1000
        validation_samples = pya.generate_independent_random_samples(
            var_trans.variable, nvalidation_samples)
        validation_values = sobol_g_function(a, validation_samples)

        poly_validation_vals = poly(validation_samples)
        rel_error = np.linalg.norm(poly_validation_vals - validation_values
                                   ) / np.linalg.norm(validation_values)
        print('Rel. Error', rel_error)

        pce_main_effects, pce_total_effects =\
            pya.get_main_and_total_effect_indices_from_pce(
                poly.get_coefficients(), poly.get_indices())
        interaction_terms, pce_sobol_indices = get_sobol_indices(
            poly.get_coefficients(), poly.get_indices(), max_order=3)

        mean, variance, main_effects, total_effects, sobol_indices = \
            get_sobol_g_function_statistics(a, interaction_terms)
        assert np.allclose(poly.mean(), mean, atol=1e-2)
        # print((poly.variance(),variance))
        assert np.allclose(poly.variance(), variance, atol=1e-2)
        # print(pce_main_effects,main_effects)
        assert np.allclose(pce_main_effects, main_effects, atol=1e-2)
        # print(pce_total_effects,total_effects)
        assert np.allclose(pce_total_effects, total_effects, atol=1e-2)
        assert np.allclose(pce_sobol_indices, sobol_indices, atol=1e-2)
Ejemplo n.º 4
0
    def help_cross_validate_pce_degree(self, solver_type, solver_options):
        print(solver_type, solver_options)
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree = 3
        poly.set_indices(pya.compute_hyperbolic_indices(num_vars, degree, 1.0))
        # factor of 2 does not pass test but 2.2 does
        num_samples = int(poly.num_terms() * 2.2)
        coef = np.random.normal(0, 1, (poly.indices.shape[1], 2))
        coef[pya.nchoosek(num_vars + 2, 2):, 0] = 0
        # for first qoi make degree 2 the best degree
        poly.set_coefficients(coef)

        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)
        train_vals = poly(train_samples)
        true_poly = poly

        poly = approximate(
            train_samples, train_vals, "polynomial_chaos", {
                "basis_type": "hyperbolic_cross",
                "variable": variable,
                "options": {
                    "verbose": 3,
                    "solver_type": solver_type,
                    "min_degree": 1,
                    "max_degree": degree + 1,
                    "linear_solver_options": solver_options
                }
            }).approx

        num_validation_samples = 10
        validation_samples = pya.generate_independent_random_samples(
            variable, num_validation_samples)
        assert np.allclose(poly(validation_samples),
                           true_poly(validation_samples))

        poly = copy.deepcopy(true_poly)
        approx_res = cross_validate_pce_degree(
            poly,
            train_samples,
            train_vals,
            1,
            degree + 1,
            solver_type=solver_type,
            linear_solver_options=solver_options)
        assert np.allclose(approx_res.degrees, [2, 3])
Ejemplo n.º 5
0
    def test_pce_basis_expansion(self):
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree, hcross_strength = 7, 0.4
        poly.set_indices(
            pya.compute_hyperbolic_indices(num_vars, degree, hcross_strength))
        num_samples = poly.num_terms() * 2
        degrees = poly.indices.sum(axis=0)
        coef = np.random.normal(
            0, 1, (poly.indices.shape[1], 2)) / (degrees[:, np.newaxis] + 1)**2
        # set some coefficients to zero to make sure that different qoi
        # are treated correctly.
        II = np.random.permutation(coef.shape[0])[:coef.shape[0] // 2]
        coef[II, 0] = 0
        II = np.random.permutation(coef.shape[0])[:coef.shape[0] // 2]
        coef[II, 1] = 0
        poly.set_coefficients(coef)
        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)
        train_vals = poly(train_samples)
        true_poly = poly

        poly = approximate(
            train_samples, train_vals, "polynomial_chaos", {
                "basis_type": "expanding_basis",
                "variable": variable,
                "options": {
                    "max_num_expansion_steps_iter": 1,
                    "verbose": 3,
                    "max_num_terms": 1000,
                    "max_num_step_increases": 2,
                    "max_num_init_terms": 33
                }
            }).approx

        num_validation_samples = 100
        validation_samples = pya.generate_independent_random_samples(
            variable, num_validation_samples)
        validation_samples = train_samples
        error = np.linalg.norm(
            poly(validation_samples) -
            true_poly(validation_samples)) / np.sqrt(num_validation_samples)
        assert np.allclose(poly(validation_samples),
                           true_poly(validation_samples),
                           atol=1e-8), error
Ejemplo n.º 6
0
    def __init__(self, mesh_dof=100, num_terms=35):
        self.mesh = np.linspace(-1., 1., mesh_dof)
        self.num_terms = num_terms

        variable = [uniform(-1, 2)]
        var_trans = pya.AffineRandomVariableTransformation(variable)
        self.poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        self.poly.configure(poly_opts)
        self.poly.set_indices(
            pya.compute_hyperbolic_indices(1, self.num_terms - 1))
Ejemplo n.º 7
0
    def test_pce_sensitivities_of_ishigami_function(self):
        nsamples = 1500
        nvars, degree = 3, 18
        univariate_variables = [uniform(-np.pi, 2 * np.pi)] * nvars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)

        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)
        indices = pya.compute_hyperbolic_indices(nvars, degree, 1.0)
        poly.set_indices(indices)
        #print('No. PCE Terms',indices.shape[1])

        samples = pya.generate_independent_random_samples(
            var_trans.variable, nsamples)
        values = ishigami_function(samples)

        basis_matrix = poly.basis_matrix(samples)
        coef = np.linalg.lstsq(basis_matrix, values, rcond=None)[0]
        poly.set_coefficients(coef)

        nvalidation_samples = 1000
        validation_samples = pya.generate_independent_random_samples(
            var_trans.variable, nvalidation_samples)
        validation_values = ishigami_function(validation_samples)
        poly_validation_vals = poly(validation_samples)
        abs_error = np.linalg.norm(poly_validation_vals - validation_values
                                   ) / np.sqrt(nvalidation_samples)
        #print('Abs. Error',abs_error)

        pce_main_effects, pce_total_effects =\
            pya.get_main_and_total_effect_indices_from_pce(
                poly.get_coefficients(), poly.get_indices())

        mean, variance, main_effects, total_effects, sobol_indices, \
            sobol_interaction_indices = get_ishigami_funciton_statistics()
        assert np.allclose(poly.mean(), mean)
        assert np.allclose(poly.variance(), variance)
        assert np.allclose(pce_main_effects, main_effects)
        assert np.allclose(pce_total_effects, total_effects)

        interaction_terms, pce_sobol_indices = get_sobol_indices(
            poly.get_coefficients(), poly.get_indices(), max_order=3)
        assert np.allclose(pce_sobol_indices, sobol_indices)
Ejemplo n.º 8
0
    def test_pce_basis_expansion(self):
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree, hcross_strength = 7, 0.4
        poly.set_indices(
            pya.compute_hyperbolic_indices(num_vars, degree, hcross_strength))
        num_samples = poly.num_terms() * 2
        degrees = poly.indices.sum(axis=0)
        poly.set_coefficients((np.random.normal(0, 1, poly.indices.shape[1]) /
                               (degrees + 1)**2)[:, np.newaxis])
        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)
        train_vals = poly(train_samples)
        true_poly = poly

        poly = approximate(train_samples, train_vals, 'polynomial_chaos', {
            'basis_type': 'expanding_basis',
            'variable': variable
        })

        num_validation_samples = 100
        validation_samples = pya.generate_independent_random_samples(
            variable, num_validation_samples)
        validation_samples = train_samples
        error = np.linalg.norm(
            poly(validation_samples) -
            true_poly(validation_samples)) / np.sqrt(num_validation_samples)
        assert np.allclose(
            poly(validation_samples),true_poly(validation_samples),atol=1e-8),\
            error
Ejemplo n.º 9
0
    def test_cross_validate_pce_degree(self):
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree = 3
        poly.set_indices(pya.compute_hyperbolic_indices(num_vars, degree, 1.0))
        num_samples = poly.num_terms() * 2
        poly.set_coefficients(
            np.random.normal(0, 1, (poly.indices.shape[1], 1)))
        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)
        train_vals = poly(train_samples)
        true_poly = poly

        poly = approximate(train_samples, train_vals, 'polynomial_chaos', {
            'basis_type': 'hyperbolic_cross',
            'variable': variable
        })

        num_validation_samples = 10
        validation_samples = pya.generate_independent_random_samples(
            variable, num_validation_samples)
        assert np.allclose(poly(validation_samples),
                           true_poly(validation_samples))

        poly = copy.deepcopy(true_poly)
        poly, best_degree = cross_validate_pce_degree(poly, train_samples,
                                                      train_vals, 1,
                                                      degree + 2)
        assert best_degree == degree
Ejemplo n.º 10
0
    def test_cross_validate_approximation_after_regularization_selection(self):
        """
        This test is useful as it shows how to use cross_validate_approximation
        to produce a list of approximations on each cross validation fold
        once regularization parameters have been chosen.
        These can be used to show variance in predictions of values,
        sensitivity indices, etc.

        Ideally this could be avoided if sklearn stored the coefficients
        and alphas for each fold and then we can just find the coefficients
        that correspond to the first time the path drops below the best_alpha
        """
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree, hcross_strength = 7, 0.4
        poly.set_indices(
            pya.compute_hyperbolic_indices(num_vars, degree, hcross_strength))
        num_samples = poly.num_terms() * 2
        degrees = poly.indices.sum(axis=0)
        coef = np.random.normal(
            0, 1, (poly.indices.shape[1], 2)) / (degrees[:, np.newaxis] + 1)**2
        # set some coefficients to zero to make sure that different qoi
        # are treated correctly.
        II = np.random.permutation(coef.shape[0])[:coef.shape[0] // 2]
        coef[II, 0] = 0
        II = np.random.permutation(coef.shape[0])[:coef.shape[0] // 2]
        coef[II, 1] = 0
        poly.set_coefficients(coef)
        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)
        train_vals = poly(train_samples)
        # true_poly = poly

        result = approximate(train_samples, train_vals, "polynomial_chaos", {
            "basis_type": "expanding_basis",
            "variable": variable
        })

        # Even with the same folds, iterative methods such as Lars, LarsLasso
        # and OMP will not have cv_score from approximate and cross validate
        # approximation exactly the same because iterative methods interpolate
        # residuals to compute cross validation scores
        nfolds = 10
        linear_solver_options = [{
            "alpha": result.reg_params[0]
        }, {
            "alpha": result.reg_params[1]
        }]
        indices = [
            result.approx.indices[:, np.where(np.absolute(c) > 0)[0]]
            for c in result.approx.coefficients.T
        ]
        options = {
            "basis_type": "fixed",
            "variable": variable,
            "options": {
                "linear_solver_options": linear_solver_options,
                "indices": indices
            }
        }
        approx_list, residues_list, cv_score = \
            cross_validate_approximation(
                train_samples, train_vals, options, nfolds, "polynomial_chaos",
                random_folds="sklearn")

        assert (np.all(cv_score < 6e-14) and np.all(result.scores < 4e-13))
    def test_marginalize_polynomial_chaos_expansions(self):
        univariate_variables = [uniform(-1, 2), norm(0, 1), uniform(-1, 2)]
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        num_vars = len(univariate_variables)

        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree = 2
        indices = pya.compute_hyperbolic_indices(num_vars, degree, 1)
        poly.set_indices(indices)
        poly.set_coefficients(np.ones((indices.shape[1], 1)))

        pce_main_effects, pce_total_effects =\
            pya.get_main_and_total_effect_indices_from_pce(
                poly.get_coefficients(), poly.get_indices())
        print(poly.num_terms())

        for ii in range(num_vars):
            # Marginalize out 2 variables
            xx = np.linspace(-1, 1, 101)
            inactive_idx = np.hstack(
                (np.arange(ii), np.arange(ii + 1, num_vars)))
            marginalized_pce = pya.marginalize_polynomial_chaos_expansion(
                poly, inactive_idx, center=True)
            mvals = marginalized_pce(xx[None, :])
            variable_ii = variable.all_variables()[ii:ii + 1]
            var_trans_ii = pya.AffineRandomVariableTransformation(variable_ii)
            poly_ii = pya.PolynomialChaosExpansion()
            poly_opts_ii = \
                pya.define_poly_options_from_variable_transformation(
                    var_trans_ii)
            poly_ii.configure(poly_opts_ii)
            indices_ii = compute_hyperbolic_indices(1, degree, 1.)
            poly_ii.set_indices(indices_ii)
            poly_ii.set_coefficients(np.ones((indices_ii.shape[1], 1)))
            pvals = poly_ii(xx[None, :])
            # import matplotlib.pyplot as plt
            # plt.plot(xx, pvals)
            # plt.plot(xx, mvals, '--')
            # plt.show()
            assert np.allclose(mvals, pvals - poly.mean())
            assert np.allclose(poly_ii.variance() / poly.variance(),
                               pce_main_effects[ii])
            poly_ii.coefficients /= np.sqrt(poly.variance())
            assert np.allclose(poly_ii.variance(), pce_main_effects[ii])

            # Marginalize out 1 variable
            xx = pya.cartesian_product([xx] * 2)
            inactive_idx = np.array([ii])
            marginalized_pce = pya.marginalize_polynomial_chaos_expansion(
                poly, inactive_idx, center=True)
            mvals = marginalized_pce(xx)
            variable_ii = variable.all_variables()[:ii] +\
                variable.all_variables()[ii+1:]
            var_trans_ii = pya.AffineRandomVariableTransformation(variable_ii)
            poly_ii = pya.PolynomialChaosExpansion()
            poly_opts_ii = \
                pya.define_poly_options_from_variable_transformation(
                    var_trans_ii)
            poly_ii.configure(poly_opts_ii)
            indices_ii = pya.compute_hyperbolic_indices(2, degree, 1.)
            poly_ii.set_indices(indices_ii)
            poly_ii.set_coefficients(np.ones((indices_ii.shape[1], 1)))
            pvals = poly_ii(xx)
            assert np.allclose(mvals, pvals - poly.mean())
Ejemplo n.º 12
0
#%%
# Here we have intentionally set the coefficients :math:`c`: of the Genz function to be highly anisotropic, to emphasize the properties of the adaptive algorithm.
#
# PCE represent the model output :math:`f(\V{\rv})` as an expansion in orthonormal polynomials,
#
# .. math::
#
#   \begin{align*}
#   f(\V{\rv}) &\approx f_N(\V{\rv}) = \sum_{\lambda\in\Lambda}\alpha_{\lambda}\phi_{\lambda}(\V{\rv}), & |\Lambda| &= N.
#   \end{align*}
#
# where :math:`\lambda=(\lambda_1\ldots,\lambda_d)\in\mathbb{N}_0^d` is a multi-index and :math:`\Lambda` specifies the terms included in the expansion. In :ref:`Polynomial Chaos Regression` we set :math:`\Lambda` to be a total degree expansion. This choice was somewhat arbitray. The exact indices in :math:`\Lambda` should be chosen with more care. The number of terms in a PCE dictates how many samples are need to accurately compute the coefficients of the expansion. Consequently we should choose the index set :math:`\Lambda` in a way that minimizes error for a fixed computational budget. In this tutorial we use an adaptive algorithm to construct an index set that greedily minimizes the error in the PCE. Before starting the adaptive algorithm we must first define the PCE.

var_trans = pya.AffineRandomVariableTransformation(variable)
poly = pya.PolynomialChaosExpansion()
poly_opts = pya.define_poly_options_from_variable_transformation(var_trans)
poly.configure(poly_opts)

#%%
# Next we will generate some test data to estimate the error in the PCE as the adaptive algorithm evolves. We will compute the error at each step using a callback function.

validation_samples = pya.generate_independent_random_samples(
    var_trans.variable, int(1e3))
validation_values = model(validation_samples)

errors = []
num_samples = []


def callback(pce):
    error = compute_l2_error(validation_samples, validation_values, pce)