Exemple #1
0
    def test_pce_sensitivities_of_sobol_g_function(self):
        nsamples = 2000
        nvars, degree = 3, 8
        a = np.array([1, 2, 5])[:nvars]
        univariate_variables = [uniform(0, 1)] * nvars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)

        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)
        indices = pya.tensor_product_indices([degree] * nvars)
        poly.set_indices(indices)
        #print('No. PCE Terms',indices.shape[1])

        samples = pya.generate_independent_random_samples(
            var_trans.variable, nsamples)
        samples = (np.cos(np.random.uniform(0, np.pi,
                                            (nvars, nsamples))) + 1) / 2
        values = sobol_g_function(a, samples)

        basis_matrix = poly.basis_matrix(samples)
        weights = 1 / np.sum(basis_matrix**2, axis=1)[:, np.newaxis]
        coef = np.linalg.lstsq(basis_matrix * weights,
                               values * weights,
                               rcond=None)[0]
        poly.set_coefficients(coef)

        nvalidation_samples = 1000
        validation_samples = pya.generate_independent_random_samples(
            var_trans.variable, nvalidation_samples)
        validation_values = sobol_g_function(a, validation_samples)

        poly_validation_vals = poly(validation_samples)
        rel_error = np.linalg.norm(poly_validation_vals - validation_values
                                   ) / np.linalg.norm(validation_values)
        print('Rel. Error', rel_error)

        pce_main_effects, pce_total_effects =\
            pya.get_main_and_total_effect_indices_from_pce(
                poly.get_coefficients(), poly.get_indices())
        interaction_terms, pce_sobol_indices = get_sobol_indices(
            poly.get_coefficients(), poly.get_indices(), max_order=3)

        mean, variance, main_effects, total_effects, sobol_indices = \
            get_sobol_g_function_statistics(a, interaction_terms)
        assert np.allclose(poly.mean(), mean, atol=1e-2)
        # print((poly.variance(),variance))
        assert np.allclose(poly.variance(), variance, atol=1e-2)
        # print(pce_main_effects,main_effects)
        assert np.allclose(pce_main_effects, main_effects, atol=1e-2)
        # print(pce_total_effects,total_effects)
        assert np.allclose(pce_total_effects, total_effects, atol=1e-2)
        assert np.allclose(pce_sobol_indices, sobol_indices, atol=1e-2)
    def help_cross_validate_pce_degree(self, solver_type, solver_options):
        print(solver_type, solver_options)
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree = 3
        poly.set_indices(pya.compute_hyperbolic_indices(num_vars, degree, 1.0))
        # factor of 2 does not pass test but 2.2 does
        num_samples = int(poly.num_terms() * 2.2)
        coef = np.random.normal(0, 1, (poly.indices.shape[1], 2))
        coef[pya.nchoosek(num_vars + 2, 2):, 0] = 0
        # for first qoi make degree 2 the best degree
        poly.set_coefficients(coef)

        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)
        train_vals = poly(train_samples)
        true_poly = poly

        poly = approximate(
            train_samples, train_vals, "polynomial_chaos", {
                "basis_type": "hyperbolic_cross",
                "variable": variable,
                "options": {
                    "verbose": 3,
                    "solver_type": solver_type,
                    "min_degree": 1,
                    "max_degree": degree + 1,
                    "linear_solver_options": solver_options
                }
            }).approx

        num_validation_samples = 10
        validation_samples = pya.generate_independent_random_samples(
            variable, num_validation_samples)
        assert np.allclose(poly(validation_samples),
                           true_poly(validation_samples))

        poly = copy.deepcopy(true_poly)
        approx_res = cross_validate_pce_degree(
            poly,
            train_samples,
            train_vals,
            1,
            degree + 1,
            solver_type=solver_type,
            linear_solver_options=solver_options)
        assert np.allclose(approx_res.degrees, [2, 3])
    def test_approximate_fixed_pce(self):
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree, hcross_strength = 7, 0.4
        poly.set_indices(
            pya.compute_hyperbolic_indices(num_vars, degree, hcross_strength))
        num_samples = poly.num_terms() * 2
        degrees = poly.indices.sum(axis=0)
        coef = np.random.normal(
            0, 1, (poly.indices.shape[1], 2)) / (degrees[:, np.newaxis] + 1)**2
        # set some coefficients to zero to make sure that different qoi
        # are treated correctly.
        II = np.random.permutation(coef.shape[0])[:coef.shape[0] // 2]
        coef[II, 0] = 0
        II = np.random.permutation(coef.shape[0])[:coef.shape[0] // 2]
        coef[II, 1] = 0
        poly.set_coefficients(coef)
        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)
        train_vals = poly(train_samples)

        indices = pya.compute_hyperbolic_indices(num_vars, 1, 1)
        nfolds = 10
        method = "polynomial_chaos"
        options = {
            "basis_type": "fixed",
            "variable": variable,
            "options": {
                "linear_solver_options": {},
                "indices": indices,
                "solver_type": "lstsq"
            }
        }
        approx_list, residues_list, cv_score = \
            cross_validate_approximation(
                train_samples, train_vals, options, nfolds, method,
                random_folds=False)

        solver = LinearLeastSquaresCV(cv=nfolds, random_folds=False)
        poly.set_indices(indices)
        basis_matrix = poly.basis_matrix(train_samples)
        solver.fit(basis_matrix, train_vals[:, 0:1])
        assert np.allclose(solver.cv_score_, cv_score[0])

        solver.fit(basis_matrix, train_vals[:, 1:2])
        assert np.allclose(solver.cv_score_, cv_score[1])
    def test_pce_basis_expansion(self):
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree, hcross_strength = 7, 0.4
        poly.set_indices(
            pya.compute_hyperbolic_indices(num_vars, degree, hcross_strength))
        num_samples = poly.num_terms() * 2
        degrees = poly.indices.sum(axis=0)
        coef = np.random.normal(
            0, 1, (poly.indices.shape[1], 2)) / (degrees[:, np.newaxis] + 1)**2
        # set some coefficients to zero to make sure that different qoi
        # are treated correctly.
        II = np.random.permutation(coef.shape[0])[:coef.shape[0] // 2]
        coef[II, 0] = 0
        II = np.random.permutation(coef.shape[0])[:coef.shape[0] // 2]
        coef[II, 1] = 0
        poly.set_coefficients(coef)
        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)
        train_vals = poly(train_samples)
        true_poly = poly

        poly = approximate(
            train_samples, train_vals, "polynomial_chaos", {
                "basis_type": "expanding_basis",
                "variable": variable,
                "options": {
                    "max_num_expansion_steps_iter": 1,
                    "verbose": 3,
                    "max_num_terms": 1000,
                    "max_num_step_increases": 2,
                    "max_num_init_terms": 33
                }
            }).approx

        num_validation_samples = 100
        validation_samples = pya.generate_independent_random_samples(
            variable, num_validation_samples)
        validation_samples = train_samples
        error = np.linalg.norm(
            poly(validation_samples) -
            true_poly(validation_samples)) / np.sqrt(num_validation_samples)
        assert np.allclose(poly(validation_samples),
                           true_poly(validation_samples),
                           atol=1e-8), error
Exemple #5
0
    def __init__(self, mesh_dof=100, num_terms=35):
        self.mesh = np.linspace(-1., 1., mesh_dof)
        self.num_terms = num_terms

        variable = [uniform(-1, 2)]
        var_trans = pya.AffineRandomVariableTransformation(variable)
        self.poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        self.poly.configure(poly_opts)
        self.poly.set_indices(
            pya.compute_hyperbolic_indices(1, self.num_terms - 1))
Exemple #6
0
 def get_quadrature_rule(self):
     nvars = self.variable.num_vars()
     degrees=[10]*nvars
     var_trans = pya.AffineRandomVariableTransformation(self.variable)
     gauss_legendre = partial(
         pya.gauss_jacobi_pts_wts_1D,alpha_poly=0,beta_poly=0)
     univariate_quadrature_rules = [
         gauss_legendre,gauss_legendre,pya.gauss_hermite_pts_wts_1D,
         pya.gauss_hermite_pts_wts_1D,pya.gauss_hermite_pts_wts_1D]
     x,w = pya.get_tensor_product_quadrature_rule(
         degrees,self.variable.num_vars(),univariate_quadrature_rules,
         var_trans.map_from_canonical_space)
     return x,w
    def test_approximate_neural_network(self):
        np.random.seed(2)
        benchmark = setup_benchmark("ishigami", a=7, b=0.1)
        nvars = benchmark.variable.num_vars()
        nqoi = 1
        maxiter = 30000
        print(benchmark.variable)
        # var_trans = pya.AffineRandomVariableTransformation(
        #      [stats.uniform(-2, 4)]*nvars)
        var_trans = pya.AffineRandomVariableTransformation(benchmark.variable)
        network_opts = {
            "activation_func": "sigmoid",
            "layers": [nvars, 75, nqoi],
            "loss_func": "squared_loss",
            "var_trans": var_trans,
            "lag_mult": 0
        }
        optimizer_opts = {
            "method": "L-BFGS-B",
            "options": {
                "maxiter": maxiter,
                "iprint": -1,
                "gtol": 1e-6
            }
        }
        opts = {
            "network_opts": network_opts,
            "verbosity": 3,
            "optimizer_opts": optimizer_opts
        }
        ntrain_samples = 500
        train_samples = pya.generate_independent_random_samples(
            var_trans.variable, ntrain_samples)
        train_samples = var_trans.map_from_canonical_space(
            np.cos(np.random.uniform(0, np.pi, (nvars, ntrain_samples))))
        train_vals = benchmark.fun(train_samples)

        opts = {
            "network_opts": network_opts,
            "verbosity": 3,
            "optimizer_opts": optimizer_opts,
            "x0": 10
        }
        approx = approximate(train_samples, train_vals, "neural_network",
                             opts).approx
        nsamples = 100
        error = compute_l2_error(approx, benchmark.fun, var_trans.variable,
                                 nsamples)
        print(error)
        assert error < 6e-2
Exemple #8
0
    def test_pce_sensitivities_of_ishigami_function(self):
        nsamples = 1500
        nvars, degree = 3, 18
        univariate_variables = [uniform(-np.pi, 2 * np.pi)] * nvars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)

        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)
        indices = pya.compute_hyperbolic_indices(nvars, degree, 1.0)
        poly.set_indices(indices)
        #print('No. PCE Terms',indices.shape[1])

        samples = pya.generate_independent_random_samples(
            var_trans.variable, nsamples)
        values = ishigami_function(samples)

        basis_matrix = poly.basis_matrix(samples)
        coef = np.linalg.lstsq(basis_matrix, values, rcond=None)[0]
        poly.set_coefficients(coef)

        nvalidation_samples = 1000
        validation_samples = pya.generate_independent_random_samples(
            var_trans.variable, nvalidation_samples)
        validation_values = ishigami_function(validation_samples)
        poly_validation_vals = poly(validation_samples)
        abs_error = np.linalg.norm(poly_validation_vals - validation_values
                                   ) / np.sqrt(nvalidation_samples)
        #print('Abs. Error',abs_error)

        pce_main_effects, pce_total_effects =\
            pya.get_main_and_total_effect_indices_from_pce(
                poly.get_coefficients(), poly.get_indices())

        mean, variance, main_effects, total_effects, sobol_indices, \
            sobol_interaction_indices = get_ishigami_funciton_statistics()
        assert np.allclose(poly.mean(), mean)
        assert np.allclose(poly.variance(), variance)
        assert np.allclose(pce_main_effects, main_effects)
        assert np.allclose(pce_total_effects, total_effects)

        interaction_terms, pce_sobol_indices = get_sobol_indices(
            poly.get_coefficients(), poly.get_indices(), max_order=3)
        assert np.allclose(pce_sobol_indices, sobol_indices)
 def test_approximate_polynomial_chaos_custom_poly_type(self):
     benchmark = setup_benchmark("ishigami", a=7, b=0.1)
     nvars = benchmark.variable.num_vars()
     # this test purposefully select wrong variable to make sure
     # poly_type overide is activated
     univariate_variables = [stats.beta(5, 5, -np.pi, 2 * np.pi)] * nvars
     variable = pya.IndependentMultivariateRandomVariable(
         univariate_variables)
     var_trans = pya.AffineRandomVariableTransformation(variable)
     # specify correct basis so it is not chosen from var_trans.variable
     poly_opts = {"var_trans": var_trans}
     # but rather from another variable which will invoke Legendre polys
     basis_opts = pya.define_poly_options_from_variable(
         pya.IndependentMultivariateRandomVariable([stats.uniform()] *
                                                   nvars))
     poly_opts["poly_types"] = basis_opts
     options = {
         "poly_opts": poly_opts,
         "variable": variable,
         "options": {
             "max_num_step_increases": 1
         }
     }
     ntrain_samples = 400
     train_samples = np.random.uniform(-np.pi, np.pi,
                                       (nvars, ntrain_samples))
     train_vals = benchmark.fun(train_samples)
     approx = approximate(train_samples,
                          train_vals,
                          method="polynomial_chaos",
                          options=options).approx
     nsamples = 100
     error = compute_l2_error(approx,
                              benchmark.fun,
                              approx.var_trans.variable,
                              nsamples,
                              rel=True)
     # print(error)
     assert error < 1e-4
     assert np.allclose(approx.mean(), benchmark.mean, atol=error)
Exemple #10
0
    def test_pce_basis_expansion(self):
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree, hcross_strength = 7, 0.4
        poly.set_indices(
            pya.compute_hyperbolic_indices(num_vars, degree, hcross_strength))
        num_samples = poly.num_terms() * 2
        degrees = poly.indices.sum(axis=0)
        poly.set_coefficients((np.random.normal(0, 1, poly.indices.shape[1]) /
                               (degrees + 1)**2)[:, np.newaxis])
        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)
        train_vals = poly(train_samples)
        true_poly = poly

        poly = approximate(train_samples, train_vals, 'polynomial_chaos', {
            'basis_type': 'expanding_basis',
            'variable': variable
        })

        num_validation_samples = 100
        validation_samples = pya.generate_independent_random_samples(
            variable, num_validation_samples)
        validation_samples = train_samples
        error = np.linalg.norm(
            poly(validation_samples) -
            true_poly(validation_samples)) / np.sqrt(num_validation_samples)
        assert np.allclose(
            poly(validation_samples),true_poly(validation_samples),atol=1e-8),\
            error
Exemple #11
0
    def test_cross_validate_pce_degree(self):
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree = 3
        poly.set_indices(pya.compute_hyperbolic_indices(num_vars, degree, 1.0))
        num_samples = poly.num_terms() * 2
        poly.set_coefficients(
            np.random.normal(0, 1, (poly.indices.shape[1], 1)))
        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)
        train_vals = poly(train_samples)
        true_poly = poly

        poly = approximate(train_samples, train_vals, 'polynomial_chaos', {
            'basis_type': 'hyperbolic_cross',
            'variable': variable
        })

        num_validation_samples = 10
        validation_samples = pya.generate_independent_random_samples(
            variable, num_validation_samples)
        assert np.allclose(poly(validation_samples),
                           true_poly(validation_samples))

        poly = copy.deepcopy(true_poly)
        poly, best_degree = cross_validate_pce_degree(poly, train_samples,
                                                      train_vals, 1,
                                                      degree + 2)
        assert best_degree == degree
    def test_cross_validate_approximation_after_regularization_selection(self):
        """
        This test is useful as it shows how to use cross_validate_approximation
        to produce a list of approximations on each cross validation fold
        once regularization parameters have been chosen.
        These can be used to show variance in predictions of values,
        sensitivity indices, etc.

        Ideally this could be avoided if sklearn stored the coefficients
        and alphas for each fold and then we can just find the coefficients
        that correspond to the first time the path drops below the best_alpha
        """
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree, hcross_strength = 7, 0.4
        poly.set_indices(
            pya.compute_hyperbolic_indices(num_vars, degree, hcross_strength))
        num_samples = poly.num_terms() * 2
        degrees = poly.indices.sum(axis=0)
        coef = np.random.normal(
            0, 1, (poly.indices.shape[1], 2)) / (degrees[:, np.newaxis] + 1)**2
        # set some coefficients to zero to make sure that different qoi
        # are treated correctly.
        II = np.random.permutation(coef.shape[0])[:coef.shape[0] // 2]
        coef[II, 0] = 0
        II = np.random.permutation(coef.shape[0])[:coef.shape[0] // 2]
        coef[II, 1] = 0
        poly.set_coefficients(coef)
        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)
        train_vals = poly(train_samples)
        # true_poly = poly

        result = approximate(train_samples, train_vals, "polynomial_chaos", {
            "basis_type": "expanding_basis",
            "variable": variable
        })

        # Even with the same folds, iterative methods such as Lars, LarsLasso
        # and OMP will not have cv_score from approximate and cross validate
        # approximation exactly the same because iterative methods interpolate
        # residuals to compute cross validation scores
        nfolds = 10
        linear_solver_options = [{
            "alpha": result.reg_params[0]
        }, {
            "alpha": result.reg_params[1]
        }]
        indices = [
            result.approx.indices[:, np.where(np.absolute(c) > 0)[0]]
            for c in result.approx.coefficients.T
        ]
        options = {
            "basis_type": "fixed",
            "variable": variable,
            "options": {
                "linear_solver_options": linear_solver_options,
                "indices": indices
            }
        }
        approx_list, residues_list, cv_score = \
            cross_validate_approximation(
                train_samples, train_vals, options, nfolds, "polynomial_chaos",
                random_folds="sklearn")

        assert (np.all(cv_score < 6e-14) and np.all(result.scores < 4e-13))
    def test_marginalize_polynomial_chaos_expansions(self):
        univariate_variables = [uniform(-1, 2), norm(0, 1), uniform(-1, 2)]
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        num_vars = len(univariate_variables)

        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree = 2
        indices = pya.compute_hyperbolic_indices(num_vars, degree, 1)
        poly.set_indices(indices)
        poly.set_coefficients(np.ones((indices.shape[1], 1)))

        pce_main_effects, pce_total_effects =\
            pya.get_main_and_total_effect_indices_from_pce(
                poly.get_coefficients(), poly.get_indices())
        print(poly.num_terms())

        for ii in range(num_vars):
            # Marginalize out 2 variables
            xx = np.linspace(-1, 1, 101)
            inactive_idx = np.hstack(
                (np.arange(ii), np.arange(ii + 1, num_vars)))
            marginalized_pce = pya.marginalize_polynomial_chaos_expansion(
                poly, inactive_idx, center=True)
            mvals = marginalized_pce(xx[None, :])
            variable_ii = variable.all_variables()[ii:ii + 1]
            var_trans_ii = pya.AffineRandomVariableTransformation(variable_ii)
            poly_ii = pya.PolynomialChaosExpansion()
            poly_opts_ii = \
                pya.define_poly_options_from_variable_transformation(
                    var_trans_ii)
            poly_ii.configure(poly_opts_ii)
            indices_ii = compute_hyperbolic_indices(1, degree, 1.)
            poly_ii.set_indices(indices_ii)
            poly_ii.set_coefficients(np.ones((indices_ii.shape[1], 1)))
            pvals = poly_ii(xx[None, :])
            # import matplotlib.pyplot as plt
            # plt.plot(xx, pvals)
            # plt.plot(xx, mvals, '--')
            # plt.show()
            assert np.allclose(mvals, pvals - poly.mean())
            assert np.allclose(poly_ii.variance() / poly.variance(),
                               pce_main_effects[ii])
            poly_ii.coefficients /= np.sqrt(poly.variance())
            assert np.allclose(poly_ii.variance(), pce_main_effects[ii])

            # Marginalize out 1 variable
            xx = pya.cartesian_product([xx] * 2)
            inactive_idx = np.array([ii])
            marginalized_pce = pya.marginalize_polynomial_chaos_expansion(
                poly, inactive_idx, center=True)
            mvals = marginalized_pce(xx)
            variable_ii = variable.all_variables()[:ii] +\
                variable.all_variables()[ii+1:]
            var_trans_ii = pya.AffineRandomVariableTransformation(variable_ii)
            poly_ii = pya.PolynomialChaosExpansion()
            poly_opts_ii = \
                pya.define_poly_options_from_variable_transformation(
                    var_trans_ii)
            poly_ii.configure(poly_opts_ii)
            indices_ii = pya.compute_hyperbolic_indices(2, degree, 1.)
            poly_ii.set_indices(indices_ii)
            poly_ii.set_coefficients(np.ones((indices_ii.shape[1], 1)))
            pvals = poly_ii(xx)
            assert np.allclose(mvals, pvals - poly.mean())
# %%
# Here we have intentionally set the coefficients :math:`c`: of the Genz function to be highly anisotropic, to emphasize the properties of the adaptive algorithm.
#
# PCE represent the model output :math:`f(\V{\rv})` as an expansion in orthonormal polynomials,
#
# .. math::
#
#   \begin{align*}
#   f(\V{\rv}) &\approx f_N(\V{\rv}) = \sum_{\lambda\in\Lambda}\alpha_{\lambda}\phi_{\lambda}(\V{\rv}), & |\Lambda| &= N.
#   \end{align*}
#
# where :math:`\lambda=(\lambda_1\ldots,\lambda_d)\in\mathbb{N}_0^d` is a multi-index and :math:`\Lambda` specifies the terms included in the expansion. In :ref:`Polynomial Chaos Regression` we set :math:`\Lambda` to be a total degree expansion. This choice was somewhat arbitray. The exact indices in :math:`\Lambda` should be chosen with more care. The number of terms in a PCE dictates how many samples are need to accurately compute the coefficients of the expansion. Consequently we should choose the index set :math:`\Lambda` in a way that minimizes error for a fixed computational budget. In this tutorial we use an adaptive algorithm to construct an index set that greedily minimizes the error in the PCE.
#
#Before starting the adaptive algorithm  we will generate some test data to estimate the error in the PCE as the adaptive algorithm evolves. We will compute the error at each step using a callback function.

var_trans = pya.AffineRandomVariableTransformation(variable)
validation_samples = pya.generate_independent_random_samples(
    var_trans.variable, int(1e3))
validation_values = model(validation_samples)

errors = []
num_samples = []


def callback(pce):
    error = compute_l2_error(validation_samples, validation_values, pce)
    errors.append(error)
    num_samples.append(pce.samples.shape[1])


# %%