예제 #1
0
    def test_pce_sensitivities_of_sobol_g_function(self):
        nsamples = 2000
        nvars, degree = 3, 8
        a = np.array([1, 2, 5])[:nvars]
        univariate_variables = [uniform(0, 1)] * nvars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)

        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)
        indices = pya.tensor_product_indices([degree] * nvars)
        poly.set_indices(indices)
        #print('No. PCE Terms',indices.shape[1])

        samples = pya.generate_independent_random_samples(
            var_trans.variable, nsamples)
        samples = (np.cos(np.random.uniform(0, np.pi,
                                            (nvars, nsamples))) + 1) / 2
        values = sobol_g_function(a, samples)

        basis_matrix = poly.basis_matrix(samples)
        weights = 1 / np.sum(basis_matrix**2, axis=1)[:, np.newaxis]
        coef = np.linalg.lstsq(basis_matrix * weights,
                               values * weights,
                               rcond=None)[0]
        poly.set_coefficients(coef)

        nvalidation_samples = 1000
        validation_samples = pya.generate_independent_random_samples(
            var_trans.variable, nvalidation_samples)
        validation_values = sobol_g_function(a, validation_samples)

        poly_validation_vals = poly(validation_samples)
        rel_error = np.linalg.norm(poly_validation_vals - validation_values
                                   ) / np.linalg.norm(validation_values)
        print('Rel. Error', rel_error)

        pce_main_effects, pce_total_effects =\
            pya.get_main_and_total_effect_indices_from_pce(
                poly.get_coefficients(), poly.get_indices())
        interaction_terms, pce_sobol_indices = get_sobol_indices(
            poly.get_coefficients(), poly.get_indices(), max_order=3)

        mean, variance, main_effects, total_effects, sobol_indices = \
            get_sobol_g_function_statistics(a, interaction_terms)
        assert np.allclose(poly.mean(), mean, atol=1e-2)
        # print((poly.variance(),variance))
        assert np.allclose(poly.variance(), variance, atol=1e-2)
        # print(pce_main_effects,main_effects)
        assert np.allclose(pce_main_effects, main_effects, atol=1e-2)
        # print(pce_total_effects,total_effects)
        assert np.allclose(pce_total_effects, total_effects, atol=1e-2)
        assert np.allclose(pce_sobol_indices, sobol_indices, atol=1e-2)
예제 #2
0
    def help_cross_validate_pce_degree(self, solver_type, solver_options):
        print(solver_type, solver_options)
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree = 3
        poly.set_indices(pya.compute_hyperbolic_indices(num_vars, degree, 1.0))
        # factor of 2 does not pass test but 2.2 does
        num_samples = int(poly.num_terms() * 2.2)
        coef = np.random.normal(0, 1, (poly.indices.shape[1], 2))
        coef[pya.nchoosek(num_vars + 2, 2):, 0] = 0
        # for first qoi make degree 2 the best degree
        poly.set_coefficients(coef)

        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)
        train_vals = poly(train_samples)
        true_poly = poly

        poly = approximate(
            train_samples, train_vals, "polynomial_chaos", {
                "basis_type": "hyperbolic_cross",
                "variable": variable,
                "options": {
                    "verbose": 3,
                    "solver_type": solver_type,
                    "min_degree": 1,
                    "max_degree": degree + 1,
                    "linear_solver_options": solver_options
                }
            }).approx

        num_validation_samples = 10
        validation_samples = pya.generate_independent_random_samples(
            variable, num_validation_samples)
        assert np.allclose(poly(validation_samples),
                           true_poly(validation_samples))

        poly = copy.deepcopy(true_poly)
        approx_res = cross_validate_pce_degree(
            poly,
            train_samples,
            train_vals,
            1,
            degree + 1,
            solver_type=solver_type,
            linear_solver_options=solver_options)
        assert np.allclose(approx_res.degrees, [2, 3])
예제 #3
0
    def test_pce_basis_expansion(self):
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree, hcross_strength = 7, 0.4
        poly.set_indices(
            pya.compute_hyperbolic_indices(num_vars, degree, hcross_strength))
        num_samples = poly.num_terms() * 2
        degrees = poly.indices.sum(axis=0)
        coef = np.random.normal(
            0, 1, (poly.indices.shape[1], 2)) / (degrees[:, np.newaxis] + 1)**2
        # set some coefficients to zero to make sure that different qoi
        # are treated correctly.
        II = np.random.permutation(coef.shape[0])[:coef.shape[0] // 2]
        coef[II, 0] = 0
        II = np.random.permutation(coef.shape[0])[:coef.shape[0] // 2]
        coef[II, 1] = 0
        poly.set_coefficients(coef)
        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)
        train_vals = poly(train_samples)
        true_poly = poly

        poly = approximate(
            train_samples, train_vals, "polynomial_chaos", {
                "basis_type": "expanding_basis",
                "variable": variable,
                "options": {
                    "max_num_expansion_steps_iter": 1,
                    "verbose": 3,
                    "max_num_terms": 1000,
                    "max_num_step_increases": 2,
                    "max_num_init_terms": 33
                }
            }).approx

        num_validation_samples = 100
        validation_samples = pya.generate_independent_random_samples(
            variable, num_validation_samples)
        validation_samples = train_samples
        error = np.linalg.norm(
            poly(validation_samples) -
            true_poly(validation_samples)) / np.sqrt(num_validation_samples)
        assert np.allclose(poly(validation_samples),
                           true_poly(validation_samples),
                           atol=1e-8), error
예제 #4
0
 def test_piston_gradient(self):
     benchmark = setup_benchmark("piston")
     sample = pya.generate_independent_random_samples(benchmark.variable, 1)
     print(benchmark.jac(sample))
     errors = pya.check_gradients(benchmark.fun, benchmark.jac, sample)
     errors = errors[np.isfinite(errors)]
     assert errors.max() > 0.1 and errors.min() <= 6e-7
예제 #5
0
    def test_approximate_gaussian_process(self):
        from sklearn.gaussian_process.kernels import Matern
        num_vars = 1
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        num_samples = 100
        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)

        # Generate random function
        nu = np.inf  # 2.5
        kernel = Matern(0.5, nu=nu)
        X = np.linspace(-1, 1, 1000)[np.newaxis, :]
        alpha = np.random.normal(0, 1, X.shape[1])
        train_vals = kernel(train_samples.T, X.T).dot(alpha)[:, np.newaxis]

        gp = approximate(train_samples, train_vals, "gaussian_process", {
            "nu": nu,
            "noise_level": 1e-8
        }).approx

        error = np.linalg.norm(gp(X)[:, 0]-kernel(X.T, X.T).dot(alpha)) /\
            np.sqrt(X.shape[1])
        assert error < 1e-5
예제 #6
0
    def test_pce_sensitivities_of_ishigami_function(self):
        nsamples = 1500
        nvars, degree = 3, 18
        univariate_variables = [uniform(-np.pi, 2 * np.pi)] * nvars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)

        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)
        indices = pya.compute_hyperbolic_indices(nvars, degree, 1.0)
        poly.set_indices(indices)
        #print('No. PCE Terms',indices.shape[1])

        samples = pya.generate_independent_random_samples(
            var_trans.variable, nsamples)
        values = ishigami_function(samples)

        basis_matrix = poly.basis_matrix(samples)
        coef = np.linalg.lstsq(basis_matrix, values, rcond=None)[0]
        poly.set_coefficients(coef)

        nvalidation_samples = 1000
        validation_samples = pya.generate_independent_random_samples(
            var_trans.variable, nvalidation_samples)
        validation_values = ishigami_function(validation_samples)
        poly_validation_vals = poly(validation_samples)
        abs_error = np.linalg.norm(poly_validation_vals - validation_values
                                   ) / np.sqrt(nvalidation_samples)
        #print('Abs. Error',abs_error)

        pce_main_effects, pce_total_effects =\
            pya.get_main_and_total_effect_indices_from_pce(
                poly.get_coefficients(), poly.get_indices())

        mean, variance, main_effects, total_effects, sobol_indices, \
            sobol_interaction_indices = get_ishigami_funciton_statistics()
        assert np.allclose(poly.mean(), mean)
        assert np.allclose(poly.variance(), variance)
        assert np.allclose(pce_main_effects, main_effects)
        assert np.allclose(pce_total_effects, total_effects)

        interaction_terms, pce_sobol_indices = get_sobol_indices(
            poly.get_coefficients(), poly.get_indices(), max_order=3)
        assert np.allclose(pce_sobol_indices, sobol_indices)
예제 #7
0
 def test_wing_weight_gradient(self):
     variable = define_wing_weight_random_variables()
     fun = wing_weight_function
     grad = wing_weight_gradient
     sample = pya.generate_independent_random_samples(variable, 1)
     errors = pya.check_gradients(fun, grad, sample)
     errors = errors[np.isfinite(errors)]
     assert errors.max() > 0.1 and errors.min() <= 6e-7
예제 #8
0
    def test_cantilever_beam_gradients(self):
        benchmark = setup_benchmark('cantilever_beam')
        from pyapprox.models.wrappers import ActiveSetVariableModel
        fun = ActiveSetVariableModel(
            benchmark.fun,
            benchmark.variable.num_vars() +
            benchmark.design_variable.num_vars(),
            benchmark.variable.get_statistics('mean'),
            benchmark.design_var_indices)
        jac = ActiveSetVariableModel(
            benchmark.jac,
            benchmark.variable.num_vars() +
            benchmark.design_variable.num_vars(),
            benchmark.variable.get_statistics('mean'),
            benchmark.design_var_indices)
        init_guess = 2 * np.ones((2, 1))
        errors = pya.check_gradients(fun, jac, init_guess, disp=True)
        assert errors.min() < 4e-7

        constraint_fun = ActiveSetVariableModel(
            benchmark.constraint_fun,
            benchmark.variable.num_vars() +
            benchmark.design_variable.num_vars(),
            benchmark.variable.get_statistics('mean'),
            benchmark.design_var_indices)
        constraint_jac = ActiveSetVariableModel(
            benchmark.constraint_jac,
            benchmark.variable.num_vars() +
            benchmark.design_variable.num_vars(),
            benchmark.variable.get_statistics('mean'),
            benchmark.design_var_indices)
        init_guess = 2 * np.ones((2, 1))
        errors = pya.check_gradients(constraint_fun,
                                     constraint_jac,
                                     init_guess,
                                     disp=True)
        assert errors.min() < 4e-7

        nsamples = 10
        samples = pya.generate_independent_random_samples(
            benchmark.variable, nsamples)
        constraint_fun = ActiveSetVariableModel(
            benchmark.constraint_fun,
            benchmark.variable.num_vars() +
            benchmark.design_variable.num_vars(), samples,
            benchmark.design_var_indices)
        constraint_jac = ActiveSetVariableModel(
            benchmark.constraint_jac,
            benchmark.variable.num_vars() +
            benchmark.design_variable.num_vars(), samples,
            benchmark.design_var_indices)
        init_guess = 2 * np.ones((2, 1))
        errors = pya.check_gradients(
            lambda x: constraint_fun(x).flatten(order='F'),
            constraint_jac,
            init_guess,
            disp=True)
        assert errors.min() < 4e-7
예제 #9
0
    def test_approximate_fixed_pce(self):
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree, hcross_strength = 7, 0.4
        poly.set_indices(
            pya.compute_hyperbolic_indices(num_vars, degree, hcross_strength))
        num_samples = poly.num_terms() * 2
        degrees = poly.indices.sum(axis=0)
        coef = np.random.normal(
            0, 1, (poly.indices.shape[1], 2)) / (degrees[:, np.newaxis] + 1)**2
        # set some coefficients to zero to make sure that different qoi
        # are treated correctly.
        I = np.random.permutation(coef.shape[0])[:coef.shape[0] // 2]
        coef[I, 0] = 0
        I = np.random.permutation(coef.shape[0])[:coef.shape[0] // 2]
        coef[I, 1] = 0
        poly.set_coefficients(coef)
        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)
        train_vals = poly(train_samples)

        indices = compute_hyperbolic_indices(num_vars, 1, 1)
        nfolds = 10
        method = 'polynomial_chaos'
        options = {
            'basis_type': 'fixed',
            'variable': variable,
            'options': {
                'linear_solver_options': {},
                'indices': indices,
                'solver_type': 'lstsq'
            }
        }
        approx_list, residues_list, cv_score = cross_validate_approximation(
            train_samples,
            train_vals,
            options,
            nfolds,
            method,
            random_folds=False)

        solver = LinearLeastSquaresCV(cv=nfolds, random_folds=False)
        poly.set_indices(indices)
        basis_matrix = poly.basis_matrix(train_samples)
        solver.fit(basis_matrix, train_vals[:, 0:1])
        assert np.allclose(solver.cv_score_, cv_score[0])

        solver.fit(basis_matrix, train_vals[:, 1:2])
        assert np.allclose(solver.cv_score_, cv_score[1])
예제 #10
0
    def test_adaptive_approximate_gaussian_process_normalize_inputs(self):
        from sklearn.gaussian_process.kernels import Matern
        num_vars = 1
        univariate_variables = [stats.beta(5, 10, 0, 2)] * num_vars

        # Generate random function
        nu = np.inf  # 2.5
        kernel = Matern(0.1, nu=nu)
        X = np.linspace(-1, 1, 1000)[np.newaxis, :]
        alpha = np.random.normal(0, 1, X.shape[1])

        def fun(x):
            return kernel(x.T, X.T).dot(alpha)[:, np.newaxis]
            # return np.cos(2*np.pi*x.sum(axis=0)/num_vars)[:, np.newaxis]

        errors = []
        validation_samples = pya.generate_independent_random_samples(
            pya.IndependentMultivariateRandomVariable(univariate_variables),
            100)
        validation_values = fun(validation_samples)

        def callback(gp):
            gp_vals = gp(validation_samples)
            assert gp_vals.shape == validation_values.shape
            error = np.linalg.norm(gp_vals - validation_values
                                   ) / np.linalg.norm(validation_values)
            print(error, gp.y_train_.shape[0])
            errors.append(error)

        weight_function = partial(
            pya.tensor_product_pdf,
            univariate_pdfs=[v.pdf for v in univariate_variables])

        gp = adaptive_approximate(
            fun, univariate_variables, "gaussian_process", {
                "nu": nu,
                "noise_level": None,
                "normalize_y": True,
                "alpha": 1e-10,
                "normalize_inputs": True,
                "weight_function": weight_function,
                "ncandidate_samples": 1e3,
                "callback": callback
            }).approx

        # import matplotlib.pyplot as plt
        # plt.plot(gp.X_train_.T[0, :], 0*gp.X_train_.T[0, :], "s")
        # plt.plot(gp.get_training_samples()[0, :], 0*gp.get_training_samples()[0, :], "x")
        # plt.plot(gp.sampler.candidate_samples[0, :], 0*gp.sampler.candidate_samples[0, :], "^")
        # plt.plot(validation_samples[0, :], validation_values[:, 0], "o")
        # var = univariate_variables[0]
        # lb, ub = var.interval(1)
        # xx = np.linspace(lb, ub, 101)
        # plt.plot(xx, var.pdf(xx), "r-")
        # plt.show()
        print(errors[-1])
        assert errors[-1] < 1e-7
예제 #11
0
    def test_pce_basis_expansion(self):
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree, hcross_strength = 7, 0.4
        poly.set_indices(
            pya.compute_hyperbolic_indices(num_vars, degree, hcross_strength))
        num_samples = poly.num_terms() * 2
        degrees = poly.indices.sum(axis=0)
        poly.set_coefficients((np.random.normal(0, 1, poly.indices.shape[1]) /
                               (degrees + 1)**2)[:, np.newaxis])
        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)
        train_vals = poly(train_samples)
        true_poly = poly

        poly = approximate(train_samples, train_vals, 'polynomial_chaos', {
            'basis_type': 'expanding_basis',
            'variable': variable
        })

        num_validation_samples = 100
        validation_samples = pya.generate_independent_random_samples(
            variable, num_validation_samples)
        validation_samples = train_samples
        error = np.linalg.norm(
            poly(validation_samples) -
            true_poly(validation_samples)) / np.sqrt(num_validation_samples)
        assert np.allclose(
            poly(validation_samples),true_poly(validation_samples),atol=1e-8),\
            error
예제 #12
0
    def test_approximate_neural_network(self):
        np.random.seed(2)
        benchmark = setup_benchmark("ishigami", a=7, b=0.1)
        nvars = benchmark.variable.num_vars()
        nqoi = 1
        maxiter = 30000
        print(benchmark.variable)
        # var_trans = pya.AffineRandomVariableTransformation(
        #      [stats.uniform(-2, 4)]*nvars)
        var_trans = pya.AffineRandomVariableTransformation(benchmark.variable)
        network_opts = {
            "activation_func": "sigmoid",
            "layers": [nvars, 75, nqoi],
            "loss_func": "squared_loss",
            "var_trans": var_trans,
            "lag_mult": 0
        }
        optimizer_opts = {
            "method": "L-BFGS-B",
            "options": {
                "maxiter": maxiter,
                "iprint": -1,
                "gtol": 1e-6
            }
        }
        opts = {
            "network_opts": network_opts,
            "verbosity": 3,
            "optimizer_opts": optimizer_opts
        }
        ntrain_samples = 500
        train_samples = pya.generate_independent_random_samples(
            var_trans.variable, ntrain_samples)
        train_samples = var_trans.map_from_canonical_space(
            np.cos(np.random.uniform(0, np.pi, (nvars, ntrain_samples))))
        train_vals = benchmark.fun(train_samples)

        opts = {
            "network_opts": network_opts,
            "verbosity": 3,
            "optimizer_opts": optimizer_opts,
            "x0": 10
        }
        approx = approximate(train_samples, train_vals, "neural_network",
                             opts).approx
        nsamples = 100
        error = compute_l2_error(approx, benchmark.fun, var_trans.variable,
                                 nsamples)
        print(error)
        assert error < 6e-2
예제 #13
0
    def test_cross_validate_pce_degree(self):
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree = 3
        poly.set_indices(pya.compute_hyperbolic_indices(num_vars, degree, 1.0))
        num_samples = poly.num_terms() * 2
        poly.set_coefficients(
            np.random.normal(0, 1, (poly.indices.shape[1], 1)))
        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)
        train_vals = poly(train_samples)
        true_poly = poly

        poly = approximate(train_samples, train_vals, 'polynomial_chaos', {
            'basis_type': 'hyperbolic_cross',
            'variable': variable
        })

        num_validation_samples = 10
        validation_samples = pya.generate_independent_random_samples(
            variable, num_validation_samples)
        assert np.allclose(poly(validation_samples),
                           true_poly(validation_samples))

        poly = copy.deepcopy(true_poly)
        poly, best_degree = cross_validate_pce_degree(poly, train_samples,
                                                      train_vals, 1,
                                                      degree + 2)
        assert best_degree == degree
예제 #14
0
    def test_analyze_sensitivity_polynomial_chaos(self):
        from pyapprox.benchmarks.benchmarks import setup_benchmark
        from pyapprox.approximate import approximate
        benchmark = setup_benchmark("ishigami", a=7, b=0.1)

        num_samples = 1000
        train_samples = pya.generate_independent_random_samples(
            benchmark.variable, num_samples)
        train_vals = benchmark.fun(train_samples)

        pce = approximate(
            train_samples, train_vals, 'polynomial_chaos', {
                'basis_type': 'hyperbolic_cross',
                'variable': benchmark.variable,
                'options': {
                    'max_degree': 8
                }
            }).approx

        res = analyze_sensitivity_polynomial_chaos(pce)
        assert np.allclose(res.main_effects, benchmark.main_effects, atol=2e-3)
예제 #15
0
    def test_cross_validate_approximation_after_regularization_selection(self):
        """
        This test is useful as it shows how to use cross_validate_approximation
        to produce a list of approximations on each cross validation fold
        once regularization parameters have been chosen.
        These can be used to show variance in predictions of values,
        sensitivity indices, etc.

        Ideally this could be avoided if sklearn stored the coefficients
        and alphas for each fold and then we can just find the coefficients
        that correspond to the first time the path drops below the best_alpha
        """
        num_vars = 2
        univariate_variables = [stats.uniform(-1, 2)] * num_vars
        variable = pya.IndependentMultivariateRandomVariable(
            univariate_variables)
        var_trans = pya.AffineRandomVariableTransformation(variable)
        poly = pya.PolynomialChaosExpansion()
        poly_opts = pya.define_poly_options_from_variable_transformation(
            var_trans)
        poly.configure(poly_opts)

        degree, hcross_strength = 7, 0.4
        poly.set_indices(
            pya.compute_hyperbolic_indices(num_vars, degree, hcross_strength))
        num_samples = poly.num_terms() * 2
        degrees = poly.indices.sum(axis=0)
        coef = np.random.normal(
            0, 1, (poly.indices.shape[1], 2)) / (degrees[:, np.newaxis] + 1)**2
        # set some coefficients to zero to make sure that different qoi
        # are treated correctly.
        II = np.random.permutation(coef.shape[0])[:coef.shape[0] // 2]
        coef[II, 0] = 0
        II = np.random.permutation(coef.shape[0])[:coef.shape[0] // 2]
        coef[II, 1] = 0
        poly.set_coefficients(coef)
        train_samples = pya.generate_independent_random_samples(
            variable, num_samples)
        train_vals = poly(train_samples)
        # true_poly = poly

        result = approximate(train_samples, train_vals, "polynomial_chaos", {
            "basis_type": "expanding_basis",
            "variable": variable
        })

        # Even with the same folds, iterative methods such as Lars, LarsLasso
        # and OMP will not have cv_score from approximate and cross validate
        # approximation exactly the same because iterative methods interpolate
        # residuals to compute cross validation scores
        nfolds = 10
        linear_solver_options = [{
            "alpha": result.reg_params[0]
        }, {
            "alpha": result.reg_params[1]
        }]
        indices = [
            result.approx.indices[:, np.where(np.absolute(c) > 0)[0]]
            for c in result.approx.coefficients.T
        ]
        options = {
            "basis_type": "fixed",
            "variable": variable,
            "options": {
                "linear_solver_options": linear_solver_options,
                "indices": indices
            }
        }
        approx_list, residues_list, cv_score = \
            cross_validate_approximation(
                train_samples, train_vals, options, nfolds, "polynomial_chaos",
                random_folds="sklearn")

        assert (np.all(cv_score < 6e-14) and np.all(result.scores < 4e-13))
# Here we have intentionally set the coefficients :math:`c`: of the Genz function to be highly anisotropic, to emphasize the properties of the adaptive algorithm.
#
# PCE represent the model output :math:`f(\V{\rv})` as an expansion in orthonormal polynomials,
#
# .. math::
#
#   \begin{align*}
#   f(\V{\rv}) &\approx f_N(\V{\rv}) = \sum_{\lambda\in\Lambda}\alpha_{\lambda}\phi_{\lambda}(\V{\rv}), & |\Lambda| &= N.
#   \end{align*}
#
# where :math:`\lambda=(\lambda_1\ldots,\lambda_d)\in\mathbb{N}_0^d` is a multi-index and :math:`\Lambda` specifies the terms included in the expansion. In :ref:`Polynomial Chaos Regression` we set :math:`\Lambda` to be a total degree expansion. This choice was somewhat arbitray. The exact indices in :math:`\Lambda` should be chosen with more care. The number of terms in a PCE dictates how many samples are need to accurately compute the coefficients of the expansion. Consequently we should choose the index set :math:`\Lambda` in a way that minimizes error for a fixed computational budget. In this tutorial we use an adaptive algorithm to construct an index set that greedily minimizes the error in the PCE.
#
#Before starting the adaptive algorithm  we will generate some test data to estimate the error in the PCE as the adaptive algorithm evolves. We will compute the error at each step using a callback function.

var_trans = pya.AffineRandomVariableTransformation(variable)
validation_samples = pya.generate_independent_random_samples(
    var_trans.variable, int(1e3))
validation_values = model(validation_samples)

errors = []
num_samples = []


def callback(pce):
    error = compute_l2_error(validation_samples, validation_values, pce)
    errors.append(error)
    num_samples.append(pce.samples.shape[1])


# %%
# Now we setup the adaptive algorithm.
예제 #17
0
model = benchmark.fun
variable = benchmark.variable

#%%
#First define the levels of the multi-level model we will use. Will will skip level 0 and use levels 1,2, and 3. Thus we must define a transformation that converts the sparse grid indices starting at 0 to these levels. We can do this with
from pyapprox.adaptive_sparse_grid import ConfigureVariableTransformation

level_indices = [[1, 2, 3, 4]]
config_var_trans = ConfigureVariableTransformation(level_indices)

#%%
# Before building the sparse grid approximation let us define a callback to compute the error and total cost at each step of the sparse grid construction. To do this we will precompute some validation data. Specifically we will evaluate the model using a discretization on level higher than the discretization used to construct the sparse grid. We first generate random samples and then append in the configure variable to each of these samples

validation_level = level_indices[0][-1] + 1
nvalidation_samples = 20
random_validation_samples = pya.generate_independent_random_samples(
    variable, nvalidation_samples)
validation_samples = np.vstack([
    random_validation_samples, validation_level * np.ones(
        (1, nvalidation_samples))
])
validation_values = model(validation_samples)

#print(model.work_tracker.costs)

errors, total_cost = [], []


def callback(approx):
    approx_values = approx.evaluate_using_all_data(validation_samples)
    error = np.linalg.norm(validation_values - approx_values) / np.sqrt(
        validation_samples.shape[1])
예제 #18
0
    for lb, ub in zip(model.ranges[::2], model.ranges[1::2])
]
prior_variable = pya.IndependentMultivariateRandomVariable(
    univariate_variables)
prior_pdf = lambda x: np.prod(prior_variable.evaluate('pdf', x), axis=0)
mean = 0.3
variance = 0.025**2
obs_variable = norm(mean, np.sqrt(variance))
obs_pdf = lambda y: obs_variable.pdf(y).squeeze()

#%%
#PFI requires the push forward of the prior :math:`\pi_\text{model}(f(\rv))`. Lets approximate this PDF using a Gaussian kernel density estimate built on a large number model outputs evaluated at random samples of the prior.

# Define samples used to evaluate the push forward of the prior
num_prior_samples = 10000
prior_samples = pya.generate_independent_random_samples(
    prior_variable, num_prior_samples)
response_vals_at_prior_samples = model(prior_samples)

# Construct a KDE of the push forward of the prior through the model
push_forward_kde = kde(response_vals_at_prior_samples.T)
push_forward_pdf = lambda y: push_forward_kde(y.T).squeeze()

#%%
#We can now simply evaluate
#
#.. math:: \pi_\text{post}(\rv)=\pi_\text{pr}(\rv)\frac{\pi_\text{obs}(f(\rv))}{\hat{\pi}_\text{model}(f(\rv))}
#
#using the approximate push forward PDF :math:`\hat{\pi}_\text{model}(f(\rv))`. Lets use this fact to plot the posterior density.

# Define the samples at which to evaluate the posterior density
num_pts_1d = 50
예제 #19
0
import numpy as np
import matplotlib.pyplot as plt
from pyapprox.tests.test_control_variate_monte_carlo import TunableModelEnsemble
from scipy.stats import uniform

np.random.seed(1)
univariate_variables = [uniform(-1, 2), uniform(-1, 2)]
variable = pya.IndependentMultivariateRandomVariable(univariate_variables)
print(variable)
shifts = [.1, .2]
model = TunableModelEnsemble(np.pi / 2 * .95, shifts=shifts)

#%%
# Now let us compute the mean of :math:`f_1` using Monte Carlo
nsamples = int(1e3)
samples = pya.generate_independent_random_samples(variable, nsamples)
values = model.m1(samples)
pya.print_statistics(samples, values)

#%%
# We can compute the exact mean using sympy and compute the MC MSE
import sympy as sp
z1, z2 = sp.Symbol('z1'), sp.Symbol('z2')
ranges = [-1, 1, -1, 1]
integrand_f1 = model.A1 * (sp.cos(model.theta1) * z1**3 +
                           sp.sin(model.theta1) * z2**3) + shifts[0] * 0.25
exact_integral_f1 = float(
    sp.integrate(integrand_f1, (z1, ranges[0], ranges[1]),
                 (z2, ranges[2], ranges[3])))

print('MC difference squared =', (values.mean() - exact_integral_f1)**2)
예제 #20
0
   Horizontal load :math:`X` :math:`N(500,100)`
   Vertical Load   :math:`Y` :math:`N(1000,100)`
   =============== ========= =======================

First we must specify the distribution of the random variables
"""
import numpy as np
import pyapprox as pya
from pyapprox.benchmarks.benchmarks import setup_benchmark
from functools import partial
from pyapprox.optimization import *
benchmark = setup_benchmark('cantilever_beam')

from pyapprox.models.wrappers import ActiveSetVariableModel
nsamples = 10
samples = pya.generate_independent_random_samples(benchmark.variable, nsamples)
fun = ActiveSetVariableModel(
    benchmark.fun,
    benchmark.variable.num_vars() + benchmark.design_variable.num_vars(),
    samples, benchmark.design_var_indices)
jac = ActiveSetVariableModel(
    benchmark.jac,
    benchmark.variable.num_vars() + benchmark.design_variable.num_vars(),
    samples, benchmark.design_var_indices)

generate_random_samples = partial(pya.generate_independent_random_samples,
                                  benchmark.variable, 100)
#set seed so that finite difference jacobian always uses the same set of samples for each
#step size and as used for computing the exact gradient
seed = 1
generate_sample_data = partial(generate_monte_carlo_quadrature_data,
예제 #21
0
plt.plot(xx[0, :], f2(xx), label=r'$f_2$', c='r')
plt.plot(xx[0, :], f3(xx), label=r'$f_3$', c='k')
plt.legend()
plt.show()

#%%
#Now setup the polynomial approximations of each information source
degrees = [5] * nmodels
polys, nparams = get_total_degree_polynomials(ensemble_univariate_variables,
                                              degrees)

#%%
#Next generate the training data. Here we will set the noise to be independent Gaussian with mean zero and variance :math:`0.01^2`.
nsamples = [20, 20, 3]
samples_train = [
    pya.generate_independent_random_samples(p.var_trans.variable, n)
    for p, n in zip(polys, nsamples)
]
noise_std = [0.01] * nmodels
noise = [
    noise_std[ii] * np.random.normal(0, noise_std[ii],
                                     (samples_train[ii].shape[1], 1))
    for ii in range(nmodels)
]
values_train = [f(s) + n for s, f, n in zip(samples_train, functions, noise)]

#%%
#In the following we will assume a Gaussian prior on the coefficients of each approximation. Because the noise is also Gaussian and we are using linear subspace models the posterior of the approximation coefficients will also be Gaussian.
#
#With the goal of applying classical formulas for the posterior of Gaussian-linear models let's first define the linear model which involves all information sources
#
예제 #22
0
    def test_sampling_based_sobol_indices_from_gaussian_process(self):
        from pyapprox.benchmarks.benchmarks import setup_benchmark
        from pyapprox.approximate import approximate
        benchmark = setup_benchmark("ishigami", a=7, b=0.1)
        nvars = benchmark.variable.num_vars()

        # nsobol_samples and ntrain_samples effect assert tolerances
        ntrain_samples = 500
        nsobol_samples = int(1e4)
        train_samples = pya.generate_independent_random_samples(
            benchmark.variable, ntrain_samples)
        # from pyapprox import CholeskySampler
        # sampler = CholeskySampler(nvars, 10000, benchmark.variable)
        # kernel = pya.Matern(
        #     np.array([1]*nvars), length_scale_bounds='fixed', nu=np.inf)
        # sampler.set_kernel(kernel)
        # train_samples = sampler(ntrain_samples)[0]

        train_vals = benchmark.fun(train_samples)
        approx = approximate(train_samples, train_vals, 'gaussian_process', {
            'nu': np.inf,
            'normalize_y': True
        }).approx

        from pyapprox.approximate import compute_l2_error
        error = compute_l2_error(approx,
                                 benchmark.fun,
                                 benchmark.variable,
                                 nsobol_samples,
                                 rel=True)
        print('error', error)
        # assert error < 4e-2

        order = 2
        interaction_terms = compute_hyperbolic_indices(nvars, order)
        interaction_terms = interaction_terms[:,
                                              np.where(
                                                  interaction_terms.max(
                                                      axis=0) == 1)[0]]

        result = sampling_based_sobol_indices_from_gaussian_process(
            approx,
            benchmark.variable,
            interaction_terms,
            nsobol_samples,
            sampling_method='sobol',
            ngp_realizations=1000,
            normalize=True,
            nsobol_realizations=3,
            stat_functions=(np.mean, np.std),
            ninterpolation_samples=1000,
            ncandidate_samples=2000)

        mean_mean = result['mean']['mean']
        mean_sobol_indices = result['sobol_indices']['mean']
        mean_total_effects = result['total_effects']['mean']
        mean_main_effects = mean_sobol_indices[:nvars]

        print(benchmark.mean - mean_mean)
        print(benchmark.main_effects[:, 0] - mean_main_effects)
        print(benchmark.total_effects[:, 0] - mean_total_effects)
        print(benchmark.sobol_indices[:-1, 0] - mean_sobol_indices)
        assert np.allclose(mean_mean, benchmark.mean, atol=3e-2)
        assert np.allclose(mean_main_effects,
                           benchmark.main_effects[:, 0],
                           atol=1e-2)
        assert np.allclose(mean_total_effects,
                           benchmark.total_effects[:, 0],
                           atol=1e-2)
        assert np.allclose(mean_sobol_indices,
                           benchmark.sobol_indices[:-1, 0],
                           atol=1e-2)